gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# Copyright 2012 Anton Beloglazov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" The functions from this module are shared by other components.
"""
from contracts import contract
from neat.contracts_extra import *
import os
import time
import json
import re
import numpy
import subprocess
from neat.config import *
from neat.db_utils import *
import logging
log = logging.getLogger(__name__)
@contract
def start(init_state, execute, config, time_interval, iterations=-1):
""" Start the processing loop.
:param init_state: A function accepting a config and
returning a state dictionary.
:type init_state: function
:param execute: A function performing the processing at each iteration.
:type execute: function
:param config: A config dictionary.
:type config: dict(str: *)
:param time_interval: The time interval to wait between iterations.
:type time_interval: int
:param iterations: The number of iterations to perform, -1 for infinite.
:type iterations: int
:return: The final state.
:rtype: dict(str: *)
"""
state = init_state(config)
if iterations == -1:
while True:
state = execute(config, state)
time.sleep(time_interval)
else:
for _ in xrange(iterations):
state = execute(config, state)
time.sleep(time_interval)
return state
@contract
def build_local_vm_path(local_data_directory):
""" Build the path to the local VM data directory.
:param local_data_directory: The base local data path.
:type local_data_directory: str
:return: The path to the local VM data directory.
:rtype: str
"""
return os.path.join(local_data_directory, 'vms')
@contract
def build_local_host_path(local_data_directory):
""" Build the path to the local host data file.
:param local_data_directory: The base local data path.
:type local_data_directory: str
:return: The path to the local host data file.
:rtype: str
"""
return os.path.join(local_data_directory, 'host')
@contract
def physical_cpu_count(vir_connection):
""" Get the number of physical CPUs using libvirt.
:param vir_connection: A libvirt connection object.
:type vir_connection: virConnect
:return: The number of physical CPUs.
:rtype: int
"""
return vir_connection.getInfo()[2]
@contract
def physical_cpu_mhz(vir_connection):
""" Get the CPU frequency in MHz using libvirt.
:param vir_connection: A libvirt connection object.
:type vir_connection: virConnect
:return: The CPU frequency in MHz.
:rtype: int
"""
return vir_connection.getInfo()[3]
@contract
def physical_cpu_mhz_total(vir_connection):
""" Get the sum of the core CPU frequencies in MHz using libvirt.
:param vir_connection: A libvirt connection object.
:type vir_connection: virConnect
:return: The total CPU frequency in MHz.
:rtype: int
"""
return physical_cpu_count(vir_connection) * \
physical_cpu_mhz(vir_connection)
@contract
def frange(start, end, step):
""" A range generator for floats.
:param start: The starting value.
:type start: number
:param end: The end value.
:type end: number
:param step: The step.
:type step: number
"""
while start <= end:
yield start
start += step
@contract
def init_logging(log_directory, log_file, log_level):
""" Initialize the logging system.
:param log_directory: The directory to store log files.
:type log_directory: str
:param log_file: The file name to store log messages.
:type log_file: str
:param log_level: The level of emitted log messages.
:type log_level: int
:return: Whether the logging system has been initialized.
:rtype: bool
"""
if log_level == 0:
logging.disable(logging.CRITICAL)
return True
if not os.access(log_file, os.F_OK):
if not os.access(log_directory, os.F_OK):
os.makedirs(log_directory)
elif not os.access(log_directory, os.W_OK):
raise IOError(
'Cannot write to the log directory: ' + log_directory)
elif not os.access(log_file, os.W_OK):
raise IOError('Cannot write to the log file: ' + log_file)
if log_level == 3:
level = logging.DEBUG
elif log_level == 2:
level = logging.INFO
else:
level = logging.WARNING
logger = logging.root
logger.handlers = []
logger.filters = []
logger.setLevel(level)
handler = logging.FileHandler(
os.path.join(log_directory, log_file))
handler.setFormatter(
logging.Formatter(
'%(asctime)s %(levelname)-8s %(name)s %(message)s'))
logger.addHandler(handler)
return True
@contract
def call_function_by_name(name, args):
""" Call a function specified by a fully qualified name.
:param name: A fully qualified name of a function.
:type name: str
:param args: A list of positional arguments of the function.
:type args: list
:return: The return value of the function call.
:rtype: *
"""
fragments = name.split('.')
module = '.'.join(fragments[:-1])
fromlist = fragments[-2]
function = fragments[-1]
m = __import__(module, fromlist=fromlist)
return getattr(m, function)(*args)
@contract
def parse_parameters(params):
""" Parse algorithm parameters from the config file.
:param params: JSON encoded parameters.
:type params: str
:return: A dict of parameters.
:rtype: dict(str: *)
"""
return dict((str(k), v)
for k, v in json.loads(params).items())
@contract
def parse_compute_hosts(compute_hosts):
""" Transform a coma-separated list of host names into a list.
:param compute_hosts: A coma-separated list of host names.
:type compute_hosts: str
:return: A list of host names.
:rtype: list(str)
"""
return filter(None, re.split('\W+', compute_hosts))
@contract
def calculate_migration_time(vms, bandwidth):
""" Calculate the mean migration time from VM RAM usage data.
:param vms: A map of VM UUIDs to the corresponding maximum RAM in MB.
:type vms: dict(str: int)
:param bandwidth: The network bandwidth in MB/s.
:type bandwidth: float,>0
:return: The mean VM migration time in seconds.
:rtype: float
"""
return float(numpy.mean(vms.values()) / bandwidth)
@contract
def execute_on_hosts(hosts, commands):
""" Execute Shell command on hosts over SSH.
:param hosts: A list of host names.
:type hosts: list(str)
:param commands: A list of Shell commands.
:type commands: list(str)
"""
commands_merged = ''
for command in commands:
commands_merged += 'echo $ ' + command + ';'
commands_merged += command + ';'
for host in hosts:
print 'Host: ' + host
print subprocess.Popen(
'ssh ' + host + ' "' + commands_merged + '"',
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True).communicate()[0]
| |
import logging
import time
from logging.handlers import SMTPHandler
import stripe
from flask import g, request, render_template
from werkzeug.contrib.fixers import ProxyFix
from jinja2 import ChoiceLoader, FileSystemLoader
from catwatch.lib.http_method_override_middleware import \
HTTPMethodOverrideMiddleware
from catwatch.blueprints.admin import admin
from catwatch.blueprints.page import page
from catwatch.blueprints.user import user
from catwatch.blueprints.issue import issue
from catwatch.blueprints.stream import stream
from catwatch.blueprints.billing.views.billing import billing
from catwatch.blueprints.billing.views.stripe_webhook import stripe_webhook
from catwatch.extensions import (
db,
bcrypt,
mail,
csrf,
login_manager,
bouncer,
babel,
cache,
webpack,
debug_toolbar
)
from catwatch.blueprints.billing.template_processors import format_currency
FLASK_BLUEPRINTS = [
admin,
page,
user,
issue,
billing,
stream,
stripe_webhook
]
CUSTOM_ERROR_PAGES = [404, 500, 502]
def api_keys(app):
"""
Register 0 or more API keys.
:param app: Flask application instance
:return: None
"""
stripe.api_key = app.config.get('STRIPE_SECRET_KEY')
return None
def middleware(app):
"""
Register 0 or more middleware (mutates the app passed in).
:param app: Flask application instance
:return: None
"""
# Swap request.remote_addr with the real IP address even if behind a proxy.
app.wsgi_app = ProxyFix(app.wsgi_app)
# Allow modern HTTP verbs such as PATCH and DELETE.
app.wsgi_app = HTTPMethodOverrideMiddleware(app.wsgi_app)
return None
def blueprints(app):
"""
Register 0 or more blueprints (mutates the app passed in).
:param app: Flask application instance
:return: None
"""
for blueprint in FLASK_BLUEPRINTS:
app.register_blueprint(blueprint)
return None
def extensions(app):
"""
Register 0 or more extensions (mutates the app passed in).
:param app: Flask application instance
:return: None
"""
db.init_app(app)
bcrypt.init_app(app)
mail.init_app(app)
csrf.init_app(app)
login_manager.init_app(app)
bouncer.init_app(app)
babel.init_app(app)
cache.init_app(app)
webpack.init_app(app)
debug_toolbar.init_app(app)
return None
def template_processors(app):
"""
Register 0 or more custom template processors (mutates the app passed in).
:param app: Flask application instance
:return: App jinja environment
"""
public_build_path = app.config.get('PUBLIC_BUILD_PATH')
if public_build_path:
multiple_template_loader = ChoiceLoader([
app.jinja_loader,
FileSystemLoader([public_build_path]),
])
app.jinja_loader = multiple_template_loader
app.jinja_env.add_extension('jinja2.ext.do')
app.jinja_env.filters['format_currency'] = format_currency
return app.jinja_env
def logging_handler(app):
"""
Register 0 or more logger handles (mutates the app passed in).
:param app: Flask application instance
:return: None
"""
@app.before_request
def before_request():
"""
Save time when the request started.
:return: None
"""
g.start = time.time()
return None
@app.after_request
def after_request(response):
"""
Write out a log entry for the request.
:return: Flask response
"""
if 'start' in g:
response_time = (time.time() - g.start)
else:
response_time = 0
response_time_in_ms = int(response_time * 1000)
params = {
'method': request.method,
'in': response_time_in_ms,
'url': request.path,
'ip': request.remote_addr
}
app.logger.info('%(method)s "%(url)s" in %(in)sms for %(ip)s', params)
return response
return None
def exception_handler(app):
"""
Register 0 or more exception handlers (mutates the app passed in).
:param app: Flask application instance
:return: None
"""
# This will not execute when debug is set to True.
mail_handler = SMTPHandler((app.config.get('MAIL_SERVER'),
app.config.get('MAIL_PORT')),
'bugs-noreply@catwatch.com',
[app.config.get('MAIL_USERNAME')],
'[Exception handler] A 5xx was thrown',
(app.config.get('MAIL_USERNAME'),
app.config.get('MAIL_PASSWORD')),
secure=())
mail_handler.setLevel(logging.ERROR)
mail_handler.setFormatter(logging.Formatter('''
Time: %(asctime)s
Message type: %(levelname)s
Message:
%(message)s
'''))
app.logger.addHandler(mail_handler)
return None
def error_templates(app):
"""
Register 0 or more error handlers (mutates the app passed in).
:param app: Flask application instance
:return: None
"""
def render_status(status):
"""
Render a custom template for a specific status.
Source: http://stackoverflow.com/a/30108946
:param status: Status as a written name
:type status: str
:return: None
"""
# Get the status code from the status, default to a 500 so that we
# catch all types of errors and treat them as a 500.
status_code = getattr(status, 'code', 500)
return render_template('{0}.html'.format(status_code)), status_code
for error in CUSTOM_ERROR_PAGES:
app.errorhandler(error)(render_status)
return None
| |
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo.utils import excutils
from testtools import testcase
from sahara.tests.integration.configs import config as cfg
from sahara.tests.integration.tests import cinder
from sahara.tests.integration.tests import cluster_configs
from sahara.tests.integration.tests import edp
from sahara.tests.integration.tests import map_reduce
from sahara.tests.integration.tests import scaling
from sahara.tests.integration.tests import swift
from sahara.utils import edp as utils_edp
class VanillaGatingTest(cinder.CinderVolumeTest,
cluster_configs.ClusterConfigTest,
map_reduce.MapReduceTest, swift.SwiftTest,
scaling.ScalingTest, edp.EDPTest):
config = cfg.ITConfig().vanilla_config
SKIP_CINDER_TEST = config.SKIP_CINDER_TEST
SKIP_CLUSTER_CONFIG_TEST = config.SKIP_CLUSTER_CONFIG_TEST
SKIP_EDP_TEST = config.SKIP_EDP_TEST
SKIP_MAP_REDUCE_TEST = config.SKIP_MAP_REDUCE_TEST
SKIP_SWIFT_TEST = config.SKIP_SWIFT_TEST
SKIP_SCALING_TEST = config.SKIP_SCALING_TEST
@testcase.skipIf(config.SKIP_ALL_TESTS_FOR_PLUGIN,
'All tests for Vanilla plugin were skipped')
@testcase.attr('vanilla1')
def test_vanilla_plugin_gating(self):
self.vanilla_config.IMAGE_ID, self.vanilla_config.SSH_USERNAME = (
self.get_image_id_and_ssh_username(self.vanilla_config))
# Default value of self.common_config.FLOATING_IP_POOL is None
floating_ip_pool = self.common_config.FLOATING_IP_POOL
internal_neutron_net = None
# If Neutron enabled then get ID of floating IP pool and ID of internal
# Neutron network
if self.common_config.NEUTRON_ENABLED:
floating_ip_pool = self.get_floating_ip_pool_id_for_neutron_net()
internal_neutron_net = self.get_internal_neutron_net_id()
# --------------------"tt-dn" node group template creation---------------------
node_group_template_id_list = []
try:
node_group_template_tt_dn_id = self.create_node_group_template(
name='test-node-group-template-vanilla-tt-dn',
plugin_config=self.vanilla_config,
description='test node group template for Vanilla plugin',
node_processes=['tasktracker', 'datanode'],
node_configs={
'HDFS': cluster_configs.DN_CONFIG,
'MapReduce': cluster_configs.TT_CONFIG
},
floating_ip_pool=floating_ip_pool
)
node_group_template_id_list.append(node_group_template_tt_dn_id)
except Exception as e:
with excutils.save_and_reraise_exception():
message = ('Failure while \'tt-dn\' node group '
'template creation: ')
self.print_error_log(message, e)
# ----------------------"tt" node group template creation----------------------
if not self.vanilla_config.SKIP_CINDER_TEST:
volumes_per_node = 2
volumes_size = 2
else:
volumes_per_node = 0
volumes_size = 0
try:
node_group_template_tt_id = self.create_node_group_template(
name='test-node-group-template-vanilla-tt',
plugin_config=self.vanilla_config,
description='test node group template for Vanilla plugin',
volumes_per_node=volumes_per_node,
volumes_size=volumes_size,
node_processes=['tasktracker'],
node_configs={
'MapReduce': cluster_configs.TT_CONFIG
},
floating_ip_pool=floating_ip_pool
)
node_group_template_id_list.append(node_group_template_tt_id)
except Exception as e:
with excutils.save_and_reraise_exception():
self.delete_objects(
node_group_template_id_list=node_group_template_id_list
)
message = 'Failure while \'tt\' node group template creation: '
self.print_error_log(message, e)
# ---------------------"dn" node group template creation-----------------------
try:
node_group_template_dn_id = self.create_node_group_template(
name='test-node-group-template-vanilla-dn',
plugin_config=self.vanilla_config,
description='test node group template for Vanilla plugin',
volumes_per_node=volumes_per_node,
volumes_size=volumes_size,
node_processes=['datanode'],
node_configs={
'HDFS': cluster_configs.DN_CONFIG
},
floating_ip_pool=floating_ip_pool
)
node_group_template_id_list.append(node_group_template_dn_id)
except Exception as e:
with excutils.save_and_reraise_exception():
self.delete_objects(
node_group_template_id_list=node_group_template_id_list
)
message = 'Failure while \'dn\' node group template creation: '
self.print_error_log(message, e)
# --------------------------Cluster template creation--------------------------
try:
cluster_template_id = self.create_cluster_template(
name='test-cluster-template-vanilla',
plugin_config=self.vanilla_config,
description='test cluster template for Vanilla plugin',
cluster_configs={
'HDFS': cluster_configs.CLUSTER_HDFS_CONFIG,
'MapReduce': cluster_configs.CLUSTER_MR_CONFIG,
'general': {'Enable Swift': True}
},
node_groups=[
dict(
name='master-node-jt-nn',
flavor_id=self.flavor_id,
node_processes=['namenode', 'jobtracker'],
node_configs={
'HDFS': cluster_configs.NN_CONFIG,
'MapReduce': cluster_configs.JT_CONFIG
},
floating_ip_pool=floating_ip_pool,
count=1),
dict(
name='master-node-sec-nn-oz',
flavor_id=self.flavor_id,
node_processes=['secondarynamenode', 'oozie'],
node_configs={
'HDFS': cluster_configs.SNN_CONFIG,
'JobFlow': cluster_configs.OOZIE_CONFIG
},
floating_ip_pool=floating_ip_pool,
count=1),
dict(
name='worker-node-tt-dn',
node_group_template_id=node_group_template_tt_dn_id,
count=3),
dict(
name='worker-node-dn',
node_group_template_id=node_group_template_dn_id,
count=1),
dict(
name='worker-node-tt',
node_group_template_id=node_group_template_tt_id,
count=1)
],
net_id=internal_neutron_net
)
except Exception as e:
with excutils.save_and_reraise_exception():
self.delete_objects(
node_group_template_id_list=node_group_template_id_list
)
message = 'Failure while cluster template creation: '
self.print_error_log(message, e)
# ------------------------------Cluster creation-------------------------------
try:
cluster_name = "%s-%s-v1" % (self.common_config.CLUSTER_NAME,
self.vanilla_config.PLUGIN_NAME)
cluster_id = self.create_cluster(
name=cluster_name,
plugin_config=self.vanilla_config,
cluster_template_id=cluster_template_id,
description='test cluster',
cluster_configs={}
)
self.poll_cluster_state(cluster_id)
cluster_info = self.get_cluster_info(self.vanilla_config)
self.await_active_workers_for_namenode(cluster_info['node_info'],
self.vanilla_config)
except Exception as e:
with excutils.save_and_reraise_exception():
self.delete_objects(
self.cluster_id, cluster_template_id,
node_group_template_id_list
)
message = 'Failure while cluster creation: '
self.print_error_log(message, e)
# --------------------------------CINDER TESTING-------------------------------
try:
self.cinder_volume_testing(cluster_info)
except Exception as e:
with excutils.save_and_reraise_exception():
self.delete_objects(
cluster_info['cluster_id'], cluster_template_id,
node_group_template_id_list
)
message = 'Failure while Cinder testing: '
self.print_error_log(message, e)
# ---------------------------CLUSTER CONFIG TESTING----------------------------
try:
self.cluster_config_testing(cluster_info)
except Exception as e:
with excutils.save_and_reraise_exception():
self.delete_objects(
cluster_info['cluster_id'], cluster_template_id,
node_group_template_id_list
)
message = 'Failure while cluster config testing: '
self.print_error_log(message, e)
# ---------------------------------EDP TESTING---------------------------------
def edp_test():
pig_job_data = self.edp_info.read_pig_example_script()
pig_lib_data = self.edp_info.read_pig_example_jar()
mapreduce_jar_data = self.edp_info.read_mapreduce_example_jar()
# This is a modified version of WordCount that takes swift configs
java_lib_data = self.edp_info.read_java_example_lib()
job_ids = []
job_id = self.edp_testing(
job_type=utils_edp.JOB_TYPE_PIG,
job_data_list=[{'pig': pig_job_data}],
lib_data_list=[{'jar': pig_lib_data}],
swift_binaries=True,
hdfs_local_output=True)
job_ids.append(job_id)
job_id = self.edp_testing(
job_type=utils_edp.JOB_TYPE_MAPREDUCE,
job_data_list=[],
lib_data_list=[{'jar': mapreduce_jar_data}],
configs=self.edp_info.mapreduce_example_configs(),
swift_binaries=True,
hdfs_local_output=True)
job_ids.append(job_id)
job_id = self.edp_testing(
job_type=utils_edp.JOB_TYPE_MAPREDUCE_STREAMING,
job_data_list=[],
lib_data_list=[],
configs=self.edp_info.mapreduce_streaming_configs())
job_ids.append(job_id)
job_id = self.edp_testing(
job_type=utils_edp.JOB_TYPE_JAVA,
job_data_list=[],
lib_data_list=[{'jar': java_lib_data}],
configs=self.edp_info.java_example_configs(),
pass_input_output_args=True)
job_ids.append(job_id)
self.poll_jobs_status(job_ids)
edp_test()
# -----------------------------MAP REDUCE TESTING------------------------------
try:
self.map_reduce_testing(cluster_info)
except Exception as e:
with excutils.save_and_reraise_exception():
self.delete_objects(
cluster_info['cluster_id'], cluster_template_id,
node_group_template_id_list
)
message = 'Failure while Map Reduce testing: '
self.print_error_log(message, e)
# --------------------------CHECK SWIFT AVAILABILITY---------------------------
try:
self.check_swift_availability(cluster_info)
except Exception as e:
with excutils.save_and_reraise_exception():
self.delete_objects(
cluster_info['cluster_id'], cluster_template_id,
node_group_template_id_list
)
message = 'Failure during check of Swift availability: '
self.print_error_log(message, e)
# -------------------------------CLUSTER SCALING-------------------------------
if not self.vanilla_config.SKIP_SCALING_TEST:
change_list = [
{
'operation': 'resize',
'info': ['worker-node-tt-dn', 4]
},
{
'operation': 'resize',
'info': ['worker-node-dn', 0]
},
{
'operation': 'resize',
'info': ['worker-node-tt', 0]
},
{
'operation': 'add',
'info': [
'new-worker-node-tt', 1, node_group_template_tt_id
]
},
{
'operation': 'add',
'info': [
'new-worker-node-dn', 1, node_group_template_dn_id
]
}
]
try:
new_cluster_info = self.cluster_scaling(cluster_info,
change_list)
self.await_active_workers_for_namenode(
new_cluster_info['node_info'], self.vanilla_config)
except Exception as e:
with excutils.save_and_reraise_exception():
self.delete_objects(
cluster_info['cluster_id'], cluster_template_id,
node_group_template_id_list
)
message = 'Failure while cluster scaling: '
self.print_error_log(message, e)
# -------------------------CINDER TESTING AFTER SCALING------------------------
try:
self.cinder_volume_testing(new_cluster_info)
except Exception as e:
with excutils.save_and_reraise_exception():
self.delete_objects(
new_cluster_info['cluster_id'], cluster_template_id,
node_group_template_id_list
)
message = ('Failure while Cinder testing after cluster '
'scaling: ')
self.print_error_log(message, e)
# --------------------CLUSTER CONFIG TESTING AFTER SCALING---------------------
try:
self.cluster_config_testing(new_cluster_info)
except Exception as e:
with excutils.save_and_reraise_exception():
self.delete_objects(
new_cluster_info['cluster_id'], cluster_template_id,
node_group_template_id_list
)
message = ('Failure while cluster config testing after '
'cluster scaling: ')
self.print_error_log(message, e)
# ----------------------MAP REDUCE TESTING AFTER SCALING-----------------------
try:
self.map_reduce_testing(new_cluster_info)
except Exception as e:
with excutils.save_and_reraise_exception():
self.delete_objects(
new_cluster_info['cluster_id'], cluster_template_id,
node_group_template_id_list
)
message = ('Failure while Map Reduce testing after '
'cluster scaling: ')
self.print_error_log(message, e)
# -------------------CHECK SWIFT AVAILABILITY AFTER SCALING--------------------
try:
self.check_swift_availability(new_cluster_info)
except Exception as e:
with excutils.save_and_reraise_exception():
self.delete_objects(
new_cluster_info['cluster_id'], cluster_template_id,
node_group_template_id_list
)
message = ('Failure during check of Swift availability '
'after cluster scaling: ')
self.print_error_log(message, e)
# ----------------------------- EDP AFTER SCALING -----------------------------
edp_test()
# ---------------------------DELETE CREATED OBJECTS----------------------------
self.delete_objects(
cluster_info['cluster_id'], cluster_template_id,
node_group_template_id_list
)
| |
import unittest
import pysal
import numpy as np
from pysal.spreg import error_sp_het as HET
from pysal.common import RTOL
class TestBaseGMErrorHet(unittest.TestCase):
def setUp(self):
db=pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
X.append(db.by_col("CRIME"))
self.X = np.array(X).T
self.X = np.hstack((np.ones(self.y.shape),self.X))
self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
reg = HET.BaseGM_Error_Het(self.y, self.X, self.w.sparse, step1c=True)
betas = np.array([[ 47.99626638], [ 0.71048989], [ -0.55876126], [ 0.41178776]])
np.testing.assert_allclose(reg.betas,betas,RTOL)
u = np.array([ 27.38122697])
np.testing.assert_allclose(reg.u[0],u,RTOL)
ef = np.array([ 32.29765975])
np.testing.assert_allclose(reg.e_filtered[0],ef,RTOL)
predy = np.array([ 53.08577603])
np.testing.assert_allclose(reg.predy[0],predy,RTOL)
n = 49
np.testing.assert_allclose(reg.n,n)
k = 3
np.testing.assert_allclose(reg.k,k)
y = np.array([ 80.467003])
np.testing.assert_allclose(reg.y[0],y,RTOL)
x = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_allclose(reg.x[0],x,RTOL)
i_s = 'Maximum number of iterations reached.'
np.testing.assert_string_equal(reg.iter_stop,i_s)
its = 1
np.testing.assert_allclose(reg.iteration,its,RTOL)
my = 38.436224469387746
np.testing.assert_allclose(reg.mean_y,my)
stdy = 18.466069465206047
np.testing.assert_allclose(reg.std_y,stdy)
vm = np.array([[ 1.31767529e+02, -3.58368748e+00, -1.65090647e+00,
0.00000000e+00],
[ -3.58368748e+00, 1.35513711e-01, 3.77539055e-02,
0.00000000e+00],
[ -1.65090647e+00, 3.77539055e-02, 2.61042702e-02,
0.00000000e+00],
[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
2.82398517e-02]])
np.testing.assert_allclose(reg.vm,vm,RTOL)
xtx = np.array([[ 4.90000000e+01, 7.04371999e+02, 1.72131237e+03],
[ 7.04371999e+02, 1.16866734e+04, 2.15575320e+04],
[ 1.72131237e+03, 2.15575320e+04, 7.39058986e+04]])
np.testing.assert_allclose(reg.xtx,xtx,RTOL)
class TestGMErrorHet(unittest.TestCase):
def setUp(self):
db=pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
X.append(db.by_col("CRIME"))
self.X = np.array(X).T
self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
reg = HET.GM_Error_Het(self.y, self.X, self.w, step1c=True)
betas = np.array([[ 47.99626638], [ 0.71048989], [ -0.55876126], [ 0.41178776]])
np.testing.assert_allclose(reg.betas,betas,RTOL)
u = np.array([ 27.38122697])
np.testing.assert_allclose(reg.u[0],u,RTOL)
ef = np.array([ 32.29765975])
np.testing.assert_allclose(reg.e_filtered[0],ef,RTOL)
predy = np.array([ 53.08577603])
np.testing.assert_allclose(reg.predy[0],predy,RTOL)
n = 49
np.testing.assert_allclose(reg.n,n)
k = 3
np.testing.assert_allclose(reg.k,k)
y = np.array([ 80.467003])
np.testing.assert_allclose(reg.y[0],y,RTOL)
x = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_allclose(reg.x[0],x,RTOL)
i_s = 'Maximum number of iterations reached.'
np.testing.assert_string_equal(reg.iter_stop,i_s)
its = 1
np.testing.assert_allclose(reg.iteration,its,RTOL)
my = 38.436224469387746
np.testing.assert_allclose(reg.mean_y,my)
stdy = 18.466069465206047
np.testing.assert_allclose(reg.std_y,stdy)
vm = np.array([[ 1.31767529e+02, -3.58368748e+00, -1.65090647e+00,
0.00000000e+00],
[ -3.58368748e+00, 1.35513711e-01, 3.77539055e-02,
0.00000000e+00],
[ -1.65090647e+00, 3.77539055e-02, 2.61042702e-02,
0.00000000e+00],
[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
2.82398517e-02]])
np.testing.assert_allclose(reg.vm,vm,RTOL)
pr2 = 0.34951013222581306
np.testing.assert_allclose(reg.pr2,pr2)
stde = np.array([ 11.47900385, 0.36812187, 0.16156816, 0.16804717])
np.testing.assert_allclose(reg.std_err,stde,RTOL)
z_stat = np.array([[ 4.18122226e+00, 2.89946274e-05],
[ 1.93003988e+00, 5.36018970e-02],
[ -3.45836247e+00, 5.43469673e-04],
[ 2.45042960e+00, 1.42685863e-02]])
np.testing.assert_allclose(reg.z_stat,z_stat,RTOL)
xtx = np.array([[ 4.90000000e+01, 7.04371999e+02, 1.72131237e+03],
[ 7.04371999e+02, 1.16866734e+04, 2.15575320e+04],
[ 1.72131237e+03, 2.15575320e+04, 7.39058986e+04]])
np.testing.assert_allclose(reg.xtx,xtx,RTOL)
class TestBaseGMEndogErrorHet(unittest.TestCase):
def setUp(self):
db=pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
self.X = np.array(X).T
self.X = np.hstack((np.ones(self.y.shape),self.X))
yd = []
yd.append(db.by_col("CRIME"))
self.yd = np.array(yd).T
q = []
q.append(db.by_col("DISCBD"))
self.q = np.array(q).T
self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
reg = HET.BaseGM_Endog_Error_Het(self.y, self.X, self.yd, self.q, self.w.sparse, step1c=True)
betas = np.array([[ 55.39707924], [ 0.46563046], [ -0.67038326], [ 0.41135023]])
np.testing.assert_allclose(reg.betas,betas,RTOL)
u = np.array([ 26.51812895])
np.testing.assert_allclose(reg.u[0],u,RTOL)
ef = np.array([ 31.46604707])
np.testing.assert_allclose(reg.e_filtered[0],ef,RTOL)
predy = np.array([ 53.94887405])
np.testing.assert_allclose(reg.predy[0],predy,RTOL)
n = 49
np.testing.assert_allclose(reg.n,n)
k = 3
np.testing.assert_allclose(reg.k,k)
y = np.array([ 80.467003])
np.testing.assert_allclose(reg.y[0],y,RTOL)
x = np.array([ 1. , 19.531])
np.testing.assert_allclose(reg.x[0],x,RTOL)
yend = np.array([ 15.72598])
np.testing.assert_allclose(reg.yend[0],yend,RTOL)
q = np.array([ 5.03])
np.testing.assert_allclose(reg.q[0],q,RTOL)
z = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_allclose(reg.z[0],z,RTOL)
h = np.array([ 1. , 19.531, 5.03 ])
np.testing.assert_allclose(reg.h[0],h,RTOL)
i_s = 'Maximum number of iterations reached.'
np.testing.assert_string_equal(reg.iter_stop,i_s)
its = 1
np.testing.assert_allclose(reg.iteration,its,RTOL)
my = 38.436224469387746
np.testing.assert_allclose(reg.mean_y,my)
stdy = 18.466069465206047
np.testing.assert_allclose(reg.std_y,stdy)
vm = np.array([[ 8.34637805e+02, -2.16932259e+01, -1.33327894e+01,
1.65840848e+00],
[ -2.16932259e+01, 5.97683070e-01, 3.39503523e-01,
-3.90111107e-02],
[ -1.33327894e+01, 3.39503523e-01, 2.19008080e-01,
-2.81929695e-02],
[ 1.65840848e+00, -3.90111107e-02, -2.81929695e-02,
3.15686105e-02]])
np.testing.assert_allclose(reg.vm,vm,RTOL)
hth = np.array([[ 49. , 704.371999 , 139.75 ],
[ 704.371999 , 11686.67338121, 2246.12800625],
[ 139.75 , 2246.12800625, 498.5851 ]])
np.testing.assert_allclose(reg.hth,hth,RTOL)
class TestGMEndogErrorHet(unittest.TestCase):
def setUp(self):
db=pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
self.X = np.array(X).T
yd = []
yd.append(db.by_col("CRIME"))
self.yd = np.array(yd).T
q = []
q.append(db.by_col("DISCBD"))
self.q = np.array(q).T
self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
reg = HET.GM_Endog_Error_Het(self.y, self.X, self.yd, self.q, self.w, step1c=True)
betas = np.array([[ 55.39707924], [ 0.46563046], [ -0.67038326], [ 0.41135023]])
np.testing.assert_allclose(reg.betas,betas,RTOL)
u = np.array([ 26.51812895])
np.testing.assert_allclose(reg.u[0],u,RTOL)
predy = np.array([ 53.94887405])
np.testing.assert_allclose(reg.predy[0],predy,RTOL)
n = 49
np.testing.assert_allclose(reg.n,n)
k = 3
np.testing.assert_allclose(reg.k,k)
y = np.array([ 80.467003])
np.testing.assert_allclose(reg.y[0],y,RTOL)
x = np.array([ 1. , 19.531])
np.testing.assert_allclose(reg.x[0],x,RTOL)
yend = np.array([ 15.72598])
np.testing.assert_allclose(reg.yend[0],yend,RTOL)
q = np.array([ 5.03])
np.testing.assert_allclose(reg.q[0],q,RTOL)
z = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_allclose(reg.z[0],z,RTOL)
h = np.array([ 1. , 19.531, 5.03 ])
np.testing.assert_allclose(reg.h[0],h,RTOL)
i_s = 'Maximum number of iterations reached.'
np.testing.assert_string_equal(reg.iter_stop,i_s)
its = 1
np.testing.assert_allclose(reg.iteration,its,RTOL)
my = 38.436224469387746
np.testing.assert_allclose(reg.mean_y,my)
stdy = 18.466069465206047
np.testing.assert_allclose(reg.std_y,stdy)
vm = np.array([[ 8.34637805e+02, -2.16932259e+01, -1.33327894e+01,
1.65840848e+00],
[ -2.16932259e+01, 5.97683070e-01, 3.39503523e-01,
-3.90111107e-02],
[ -1.33327894e+01, 3.39503523e-01, 2.19008080e-01,
-2.81929695e-02],
[ 1.65840848e+00, -3.90111107e-02, -2.81929695e-02,
3.15686105e-02]])
np.testing.assert_allclose(reg.vm,vm,RTOL)
pr2 = 0.34648011338954804
np.testing.assert_allclose(reg.pr2,pr2,RTOL)
std_err = np.array([ 28.89009873, 0.77309965, 0.46798299,
0.17767558])
np.testing.assert_allclose(reg.std_err,std_err,RTOL)
z_stat = np.array([(1.9175109006819244, 0.055173057472126787), (0.60229035155742305, 0.54698088217644414), (-1.4324949211864271, 0.15200223057569454), (2.3151759776869496, 0.020603303355572443)])
np.testing.assert_allclose(reg.z_stat,z_stat,RTOL)
hth = np.array([[ 49. , 704.371999 , 139.75 ],
[ 704.371999 , 11686.67338121, 2246.12800625],
[ 139.75 , 2246.12800625, 498.5851 ]])
np.testing.assert_allclose(reg.hth,hth,RTOL)
class TestBaseGMComboHet(unittest.TestCase):
def setUp(self):
db=pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
X.append(db.by_col("CRIME"))
self.X = np.array(X).T
self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
# Only spatial lag
yd2, q2 = pysal.spreg.utils.set_endog(self.y, self.X, self.w, None, None, 1, True)
self.X = np.hstack((np.ones(self.y.shape),self.X))
reg = HET.BaseGM_Combo_Het(self.y, self.X, yend=yd2, q=q2, w=self.w.sparse, step1c=True)
betas = np.array([[ 57.7778574 ], [ 0.73034922], [ -0.59257362], [ -0.2230231 ], [ 0.56636724]])
np.testing.assert_allclose(reg.betas,betas,RTOL)
u = np.array([ 25.65156033])
np.testing.assert_allclose(reg.u[0],u,RTOL)
ef = np.array([ 31.87664403])
np.testing.assert_allclose(reg.e_filtered[0],ef,RTOL)
predy = np.array([ 54.81544267])
np.testing.assert_allclose(reg.predy[0],predy,RTOL)
n = 49
np.testing.assert_allclose(reg.n,n)
k = 4
np.testing.assert_allclose(reg.k,k)
y = np.array([ 80.467003])
np.testing.assert_allclose(reg.y[0],y,RTOL)
x = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_allclose(reg.x[0],x,RTOL)
yend = np.array([ 35.4585005])
np.testing.assert_allclose(reg.yend[0],yend,RTOL)
q = np.array([ 18.594 , 24.7142675])
np.testing.assert_allclose(reg.q[0],q,RTOL)
z = np.array([ 1. , 19.531 , 15.72598 , 35.4585005])
np.testing.assert_allclose(reg.z[0],z,RTOL)
i_s = 'Maximum number of iterations reached.'
np.testing.assert_string_equal(reg.iter_stop,i_s)
its = 1
np.testing.assert_allclose(reg.iteration,its,RTOL)
my = 38.436224469387746
np.testing.assert_allclose(reg.mean_y,my)
stdy = 18.466069465206047
np.testing.assert_allclose(reg.std_y,stdy,RTOL)
vm = np.array([[ 4.86218274e+02, -2.77268729e+00, -1.59987770e+00,
-1.01969471e+01, 2.74302006e+00],
[ -2.77268729e+00, 1.04680972e-01, 2.51172238e-02,
1.95136385e-03, 3.70052723e-03],
[ -1.59987770e+00, 2.51172238e-02, 2.15655720e-02,
7.65868344e-03, -7.30173070e-03],
[ -1.01969471e+01, 1.95136385e-03, 7.65868344e-03,
2.78273684e-01, -6.89402590e-02],
[ 2.74302006e+00, 3.70052723e-03, -7.30173070e-03,
-6.89402590e-02, 7.12034037e-02]])
np.testing.assert_allclose(reg.vm,vm,RTOL)
hth = np.array([[ 4.90000000e+01, 7.04371999e+02, 1.72131237e+03,
7.24743592e+02, 1.70735413e+03],
[ 7.04371999e+02, 1.16866734e+04, 2.15575320e+04,
1.10925200e+04, 2.23848036e+04],
[ 1.72131237e+03, 2.15575320e+04, 7.39058986e+04,
2.34796298e+04, 6.70145378e+04],
[ 7.24743592e+02, 1.10925200e+04, 2.34796298e+04,
1.16146226e+04, 2.30304624e+04],
[ 1.70735413e+03, 2.23848036e+04, 6.70145378e+04,
2.30304624e+04, 6.69879858e+04]])
np.testing.assert_allclose(reg.hth,hth,RTOL)
class TestGMComboHet(unittest.TestCase):
def setUp(self):
db=pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
X.append(db.by_col("CRIME"))
self.X = np.array(X).T
self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
# Only spatial lag
reg = HET.GM_Combo_Het(self.y, self.X, w=self.w, step1c=True)
betas = np.array([[ 57.7778574 ], [ 0.73034922], [ -0.59257362], [ -0.2230231 ], [ 0.56636724]])
np.testing.assert_allclose(reg.betas,betas,RTOL)
u = np.array([ 25.65156033])
np.testing.assert_allclose(reg.u[0],u,RTOL)
ef = np.array([ 31.87664403])
np.testing.assert_allclose(reg.e_filtered[0],ef,RTOL)
ep = np.array([ 28.30648145])
np.testing.assert_allclose(reg.e_pred[0],ep,RTOL)
pe = np.array([ 52.16052155])
np.testing.assert_allclose(reg.predy_e[0],pe,RTOL)
predy = np.array([ 54.81544267])
np.testing.assert_allclose(reg.predy[0],predy,RTOL)
n = 49
np.testing.assert_allclose(reg.n,n)
k = 4
np.testing.assert_allclose(reg.k,k)
y = np.array([ 80.467003])
np.testing.assert_allclose(reg.y[0],y,RTOL)
x = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_allclose(reg.x[0],x,RTOL)
yend = np.array([ 35.4585005])
np.testing.assert_allclose(reg.yend[0],yend,RTOL)
q = np.array([ 18.594 , 24.7142675])
np.testing.assert_allclose(reg.q[0],q,RTOL)
z = np.array([ 1. , 19.531 , 15.72598 , 35.4585005])
np.testing.assert_allclose(reg.z[0],z,RTOL)
i_s = 'Maximum number of iterations reached.'
np.testing.assert_string_equal(reg.iter_stop,i_s)
its = 1
np.testing.assert_allclose(reg.iteration,its,RTOL)
my = 38.436224469387746
np.testing.assert_allclose(reg.mean_y,my)
stdy = 18.466069465206047
np.testing.assert_allclose(reg.std_y,stdy)
vm = np.array([[ 4.86218274e+02, -2.77268729e+00, -1.59987770e+00,
-1.01969471e+01, 2.74302006e+00],
[ -2.77268729e+00, 1.04680972e-01, 2.51172238e-02,
1.95136385e-03, 3.70052723e-03],
[ -1.59987770e+00, 2.51172238e-02, 2.15655720e-02,
7.65868344e-03, -7.30173070e-03],
[ -1.01969471e+01, 1.95136385e-03, 7.65868344e-03,
2.78273684e-01, -6.89402590e-02],
[ 2.74302006e+00, 3.70052723e-03, -7.30173070e-03,
-6.89402590e-02, 7.12034037e-02]])
np.testing.assert_allclose(reg.vm,vm,RTOL)
pr2 = 0.3001582877472412
np.testing.assert_allclose(reg.pr2,pr2,RTOL)
pr2_e = 0.35613102283621967
np.testing.assert_allclose(reg.pr2_e,pr2_e,RTOL)
std_err = np.array([ 22.05035768, 0.32354439, 0.14685221, 0.52751653, 0.26683966])
np.testing.assert_allclose(reg.std_err,std_err,RTOL)
z_stat = np.array([(2.6202684885795335, 0.00878605635338265), (2.2573385444145524, 0.023986928627746887), (-4.0351698589183433, 5.456281036278686e-05), (-0.42277935292121521, 0.67245625315942159), (2.1225002455741895, 0.033795752094112265)])
np.testing.assert_allclose(reg.z_stat,z_stat,RTOL)
hth = np.array([[ 4.90000000e+01, 7.04371999e+02, 1.72131237e+03,
7.24743592e+02, 1.70735413e+03],
[ 7.04371999e+02, 1.16866734e+04, 2.15575320e+04,
1.10925200e+04, 2.23848036e+04],
[ 1.72131237e+03, 2.15575320e+04, 7.39058986e+04,
2.34796298e+04, 6.70145378e+04],
[ 7.24743592e+02, 1.10925200e+04, 2.34796298e+04,
1.16146226e+04, 2.30304624e+04],
[ 1.70735413e+03, 2.23848036e+04, 6.70145378e+04,
2.30304624e+04, 6.69879858e+04]])
np.testing.assert_allclose(reg.hth,hth,RTOL)
if __name__ == '__main__':
unittest.main()
| |
# This file is part of the MapProxy project.
# Copyright (C) 2013 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
from mapproxy.test.http import (
MockServ, RequestsMismatchError, mock_httpd,
basic_auth_value, query_eq,
)
class TestMockServ(object):
def test_no_requests(self):
serv = MockServ()
with serv:
pass
def test_expects_get_no_body(self):
serv = MockServ()
serv.expects('/test')
with serv:
resp = requests.get('http://localhost:%d/test' % serv.port)
assert resp.status_code == 200
assert resp.content == b''
def test_expects_w_header(self):
serv = MockServ()
serv.expects('/test', headers={'Accept': 'Coffee'})
with serv:
resp = requests.get('http://localhost:%d/test' % serv.port, headers={'Accept': 'Coffee'})
assert resp.ok
def test_expects_w_header_but_missing(self):
serv = MockServ()
serv.expects('/test', headers={'Accept': 'Coffee'})
try:
with serv:
requests.get('http://localhost:%d/test' % serv.port)
except RequestsMismatchError as ex:
assert ex.assertions[0].expected == 'Accept: Coffee'
def test_expects_post(self):
# TODO POST handling in MockServ is hacky.
# data just gets appended to URL
serv = MockServ()
serv.expects('/test?foo', method='POST')
with serv:
requests.post('http://localhost:%d/test' % serv.port, data=b'foo')
def test_expects_post_but_get(self):
serv = MockServ()
serv.expects('/test', method='POST')
try:
with serv:
requests.get('http://localhost:%d/test' % serv.port)
except RequestsMismatchError as ex:
assert ex.assertions[0].expected == 'POST'
assert ex.assertions[0].actual == 'GET'
else:
raise AssertionError('AssertionError expected')
def test_returns(self):
serv = MockServ()
serv.expects('/test')
serv.returns(body=b'hello')
with serv:
resp = requests.get('http://localhost:%d/test' % serv.port)
assert 'Content-type' not in resp.headers
assert resp.content == b'hello'
def test_returns_headers(self):
serv = MockServ()
serv.expects('/test')
serv.returns(body=b'hello', headers={'content-type': 'text/plain'})
with serv:
resp = requests.get('http://localhost:%d/test' % serv.port)
assert resp.headers['Content-type'] == 'text/plain'
assert resp.content == b'hello'
def test_returns_status(self):
serv = MockServ()
serv.expects('/test')
serv.returns(body=b'hello', status_code=418)
with serv:
resp = requests.get('http://localhost:%d/test' % serv.port)
assert resp.status_code == 418
assert resp.content == b'hello'
def test_multiple_requests(self):
serv = MockServ()
serv.expects('/test1').returns(body=b'hello1')
serv.expects('/test2').returns(body=b'hello2')
with serv:
resp = requests.get('http://localhost:%d/test1' % serv.port)
assert resp.content == b'hello1'
resp = requests.get('http://localhost:%d/test2' % serv.port)
assert resp.content == b'hello2'
def test_too_many_requests(self):
serv = MockServ()
serv.expects('/test1').returns(body=b'hello1')
with serv:
resp = requests.get('http://localhost:%d/test1' % serv.port)
assert resp.content == b'hello1'
try:
requests.get('http://localhost:%d/test2' % serv.port)
except requests.exceptions.RequestException:
pass
else:
raise AssertionError('RequestException expected')
def test_missing_requests(self):
serv = MockServ()
serv.expects('/test1').returns(body=b'hello1')
serv.expects('/test2').returns(body=b'hello2')
try:
with serv:
resp = requests.get('http://localhost:%d/test1' % serv.port)
assert resp.content == b'hello1'
except RequestsMismatchError as ex:
assert 'requests mismatch:\n - missing requests' in str(ex)
else:
raise AssertionError('AssertionError expected')
def test_reset_unordered(self):
serv = MockServ(unordered=True)
serv.expects('/test1').returns(body=b'hello1')
serv.expects('/test2').returns(body=b'hello2')
with serv:
resp = requests.get('http://localhost:%d/test1' % serv.port)
assert resp.content == b'hello1'
resp = requests.get('http://localhost:%d/test2' % serv.port)
assert resp.content == b'hello2'
serv.reset()
with serv:
resp = requests.get('http://localhost:%d/test2' % serv.port)
assert resp.content == b'hello2'
resp = requests.get('http://localhost:%d/test1' % serv.port)
assert resp.content == b'hello1'
def test_unexpected(self):
serv = MockServ(unordered=True)
serv.expects('/test1').returns(body=b'hello1')
serv.expects('/test2').returns(body=b'hello2')
try:
with serv:
resp = requests.get('http://localhost:%d/test1' % serv.port)
assert resp.content == b'hello1'
try:
requests.get('http://localhost:%d/test3' % serv.port)
except requests.exceptions.RequestException:
pass
else:
raise AssertionError('RequestException expected')
resp = requests.get('http://localhost:%d/test2' % serv.port)
assert resp.content == b'hello2'
except RequestsMismatchError as ex:
assert 'unexpected request' in ex.assertions[0]
else:
raise AssertionError('AssertionError expected')
class TestMockHttpd(object):
def test_no_requests(self):
with mock_httpd(('localhost', 42423), []):
pass
def test_headers_status_body(self):
with mock_httpd(('localhost', 42423), [
({'path':'/test', 'headers': {'Accept': 'Coffee'}},
{'body': b'ok', 'status': 418})]):
resp = requests.get('http://localhost:42423/test', headers={'Accept': 'Coffee'})
assert resp.status_code == 418
def test_auth(self):
with mock_httpd(('localhost', 42423), [
({'path':'/test', 'headers': {'Accept': 'Coffee'}, 'require_basic_auth': True},
{'body': b'ok', 'status': 418})]):
resp = requests.get('http://localhost:42423/test')
assert resp.status_code == 401
assert resp.content == b'no access'
resp = requests.get('http://localhost:42423/test', headers={
'Authorization': basic_auth_value('foo', 'bar'), 'Accept': 'Coffee'}
)
assert resp.content == b'ok'
def test_query_eq():
assert query_eq('?baz=42&foo=bar', '?foo=bar&baz=42')
assert query_eq('?baz=42.00&foo=bar', '?foo=bar&baz=42.0')
assert query_eq('?baz=42.000000001&foo=bar', '?foo=bar&baz=42.0')
assert not query_eq('?baz=42.00000001&foo=bar', '?foo=bar&baz=42.0')
assert query_eq('?baz=42.000000001,23.99999999999&foo=bar', '?foo=bar&baz=42.0,24.0')
assert not query_eq('?baz=42.00000001&foo=bar', '?foo=bar&baz=42.0')
| |
from collections import Counter
from ceph_medic import metadata, daemon_types
from ceph_medic.util import configuration, str_to_int
#
# Utilities
#
def get_fsid(data):
# FIXME: might want to load this thing into ConfigParser so that we can fetch
# information. ceph-deploy is a good example on how to do this. See:
# https://github.com/ceph/ceph-deploy/blob/master/ceph_deploy/conf/ceph.py
cluster_path = '/etc/ceph/%s.conf' % metadata['cluster_name']
try:
contents = data['paths']['/etc/ceph']['files'][cluster_path]['contents']
except KeyError:
return ''
conf = configuration.load_string(contents)
try:
return conf.get_safe('global', 'fsid', '')
except IndexError:
return ''
def get_common_fsid():
"""
Determine what is the most common Cluster FSID. If all of them are the same
then we are fine, but if there is a mix, we need some base to compare to.
"""
all_fsids = []
for daemon_type in daemon_types:
for node_metadata in metadata[daemon_type].values():
fsids = get_host_fsids(node_metadata)
all_fsids.extend(fsids)
try:
common_fsid = Counter(all_fsids).most_common()[0][0]
except IndexError:
return ''
return common_fsid
def get_host_fsids(node_metadata):
"""
Return all the cluster FSIDs found for each socket in a host
"""
all_fsids = []
for socket_metadata in node_metadata['ceph']['sockets'].values():
config = socket_metadata.get('config', {})
if not config:
continue
fsid = config.get('fsid')
if not fsid:
continue
all_fsids.append(fsid)
return all_fsids
#
# Warning checks
#
def check_colocated_running_mons_osds(host, data):
code = 'WCOM1'
msg = 'collocated OSDs with MONs running: %s'
sockets = data['ceph']['sockets']
running_mons = []
running_osds = []
for socket_name in sockets.keys():
if "mon." in socket_name:
running_mons.append(socket_name)
elif "osd." in socket_name:
running_osds.append(socket_name)
if running_mons and running_osds:
daemons = "\n %s" % ','.join(running_osds)
return code, msg % daemons
#
# Error checks
#
def check_ceph_conf_exists(host, data):
cluster_conf = '/etc/ceph/%s.conf' % metadata['cluster_name']
files = data['paths']['/etc/ceph']['files'].keys()
if cluster_conf not in files:
msg = "%s does not exist" % cluster_conf
return 'ECOM1', msg
def check_ceph_executable_exists(host, data):
if data['ceph']['installed'] is False:
return 'ECOM2', 'ceph executable was not found in common paths when running `which`'
def check_var_lib_ceph_dir(host, data):
code = 'ECOM3'
exception = data['paths']['/var/lib/ceph']['dirs']['/var/lib/ceph']['exception']
if exception:
msg = '/var/lib/ceph could not be parsed: %s' % exception['repr']
return code, msg
def check_var_lib_ceph_permissions(host, data):
code = 'ECOM4'
group = data['paths']['/var/lib/ceph']['dirs']['/var/lib/ceph']['group']
owner = data['paths']['/var/lib/ceph']['dirs']['/var/lib/ceph']['owner']
if group == owner != 'ceph':
msg = '/var/lib/ceph has invalid ownership: %s:%s, should be ceph:ceph' % (owner, group)
return code, msg
def check_cluster_fsid(host, data):
code = 'ECOM5'
msg = 'fsid "%s" is different than host(s): %s'
mismatched_hosts = []
current_fsid = get_fsid(data)
# no fsid exists for the current host as defined in ceph.conf, let other
# checks note about this instead of reporting an empty FSID
if not current_fsid:
return
for daemon, hosts in metadata['nodes'].items():
for host in hosts:
hostname = host['host']
host_fsid = get_fsid(metadata[daemon][hostname])
if host_fsid and current_fsid != host_fsid:
mismatched_hosts.append(hostname)
if mismatched_hosts:
return code, msg % (current_fsid, ','.join(mismatched_hosts))
def check_ceph_version_parity(host, data):
code = 'ECOM6'
msg = '(installed) Ceph version "%s" is different than host(s): %s'
mismatched_hosts = []
host_version = data['ceph']['version']
for daemon, hosts in metadata['nodes'].items():
for host in hosts:
hostname = host['host']
version = metadata[daemon][hostname]['ceph']['version']
if host_version != version:
mismatched_hosts.append(hostname)
if mismatched_hosts:
return code, msg % (host_version, ','.join(mismatched_hosts))
def check_ceph_socket_and_installed_version_parity(host, data):
code = 'ECOM7'
msg = '(installed) Ceph version "%s" is different than version from running socket(s): %s'
mismatched_sockets = []
host_version = data['ceph']['version']
sockets = data['ceph']['sockets']
for socket, socket_data in sockets.items():
socket_version = socket_data['version'].get('version')
if socket_version and socket_version not in host_version:
mismatched_sockets.append("%s:%s" % (socket, socket_version))
if mismatched_sockets:
return code, msg % (host_version, ','.join(mismatched_sockets))
def check_rgw_num_rados_handles(host, data):
"""
Although this is an RGW setting, the way Ceph handles configurations can
have this setting be different depending on the daemon. Since we are
checking on every host and every socket, we are placing this check here
with common checks.
"""
code = 'WCOM7'
msg = "rgw_num_rados_handles shouldn't be larger than 1, can lead to memory leaks: %s"
sockets = data['ceph']['sockets']
failed = []
for socket, socket_data in sockets.items():
config = socket_data.get('config', {})
if not config:
continue
rgw_num_rados_handles = config.get('rgw_num_rados_handles', 1)
name = socket.split('/var/run/ceph/')[-1]
rgw_num_rados_handles = str_to_int(rgw_num_rados_handles)
if rgw_num_rados_handles > 1:
failed.append(name)
if failed:
return code, msg % ','.join(failed)
def check_fsid_exists(host, data):
code = 'ECOM8'
msg = "'fsid' is missing in the ceph configuration"
current_fsid = get_fsid(data)
if not current_fsid:
return code, msg
def check_fsid_per_daemon(host, data):
"""
In certain deployments types (hi rook!) the FSID will not be present in a
ceph conf file - it will be passed in *directly* to the daemon as an
argument. We aren't going to parse arguments, but the admin socket allows
us to poke inside and check what cluster FSID the daemon is associated
with.
"""
code = 'ECOM9'
msg = 'Found cluster FSIDs from running sockets different than: %s'
sockets = data['ceph']['sockets']
common_fsid = get_common_fsid()
if not common_fsid: # is this even possible?
return
msg = msg % common_fsid
sockets = data['ceph']['sockets']
failed = False
for socket, socket_data in sockets.items():
config = socket_data.get('config', {})
if not config:
continue
socket_fsid = config.get('fsid')
if not socket_fsid:
continue
if socket_fsid != common_fsid:
name = socket.split('/var/run/ceph/')[-1]
msg += '\n %s : %s' % (name, socket_fsid)
failed = True
if failed:
return code, msg
def check_multiple_running_mons(host, data):
code = 'ECOM10'
msg = 'multiple running mons found: %s'
sockets = data['ceph']['sockets']
running_mons = []
for socket_name in sockets.keys():
if "mon." in socket_name:
running_mons.append(socket_name)
if len(running_mons) > 1:
return code, msg % ','.join(running_mons)
| |
from __future__ import print_function
from acq4.util import Qt
import math
from acq4.util.HelpfulException import HelpfulException
import numpy as np
import pyqtgraph as pg
import acq4.analysis.tools.functions as fn
import scipy
Ui_Form = Qt.importTemplate('.SpatialCorrelatorCtrlTemplate')
class SpatialCorrelator(Qt.QWidget):
sigOutputChanged = Qt.Signal(object)
def __init__(self):
Qt.QWidget.__init__(self)
self.ctrl = Ui_Form()
self.ctrl.setupUi(self)
self.ctrl.deltaTSpin.setOpts(suffix='s', value=50e-3, dec=True, step=0.1, siPrefix=True)
self.ctrl.radiusSpin.setOpts(suffix='m', value=90e-6, dec=True, step=0.1, siPrefix=True)
self.ctrl.spontSpin.setOpts(suffix='Hz', value=0, step=0.1, siPrefix=True)
self.ctrl.thresholdSpin.setOpts(value=0.05)
#self.outline = SpatialOutline()
self.data = None ## will be a record array with 1 row per stimulation - needs to contain fields xpos, ypos, numOfPostEvents, significance
self.ctrl.processBtn.hide()
self.ctrl.processBtn.clicked.connect(self.process)
self.ctrl.deltaTSpin.sigValueChanged.connect(self.paramChanged)
self.ctrl.radiusSpin.sigValueChanged.connect(self.paramChanged)
self.ctrl.spontSpin.sigValueChanged.connect(self.paramChanged)
self.ctrl.thresholdSpin.sigValueChanged.connect(self.paramChanged)
self.ctrl.probabilityRadio.toggled.connect(self.paramChanged)
self.ctrl.eventCombo.currentIndexChanged.connect(self.paramChanged)
#def getOutline(self):
#return self.outline
def populateEventsCombo(self, arr):
names = arr.dtype.names ## it would be nice to narrow this down to only include integer fields
self.ctrl.eventCombo.updateList(names)
def setData(self, arr=None, xPos=None, yPos=None, numOfPostEvents=None):
if arr is not None:
self.checkArrayInput(arr)
self.populateEventsCombo(arr)
fields = arr.dtype.names
if 'xPos' not in fields or 'yPos' not in fields:
raise HelpfulException("Array input to Spatial correlator needs to have the following fields: 'xPos', 'yPos'")
elif arr is None:
self.data = None
return
self.data = np.zeros(len(arr), dtype=arr.dtype.descr + [('prob', float)])
self.data[:] = arr
if 'numOfPreEvents' in fields and 'PreRegionLen' in fields:
self.calculateSpontRate()
if 'PostRegionLen' in fields:
self.ctrl.deltaTSpin.setValue(self.data['PostRegionLen'][0])
self.process()
def calculateSpontRate(self):
spontRate = float(self.data['numOfPreEvents'].sum())/self.data['PreRegionLen'].sum()
self.ctrl.spontSpin.setValue(spontRate)
def paramChanged(self, *args):
self.process()
def process(self):
#print "process called."
if self.ctrl.disableChk.isChecked():
return
if self.data is None:
return
#print "calculating Probs"
fn.bendelsSpatialCorrelationAlgorithm(self.data, self.ctrl.radiusSpin.value(), self.ctrl.spontSpin.value(), self.ctrl.deltaTSpin.value(), printProcess=False, eventsKey=str(self.ctrl.eventCombo.currentText()))
#print "probs calculated"
self.data['prob'] = 1-self.data['prob'] ## give probability that events are not spontaneous
if self.ctrl.probabilityRadio.isChecked():
self.emitOutputChanged(self.data)
elif self.ctrl.thresholdRadio.isChecked():
arr = self.data['prob']
arr[1-arr < self.ctrl.thresholdSpin.value()] = 1
arr[(1-arr > self.ctrl.thresholdSpin.value())*(arr!=1)] = 0
self.data['prob'] = arr
self.emitOutputChanged(self.data)
#spacing = 5e-6
#arr = fn.convertPtsToSparseImage(self.data, ['prob'], spacing)
#arr = arr['prob']
#arr[1-arr < self.ctrl.significanceSpin.value()] = 1
#arr[(1-arr > self.ctrl.significanceSpin.value())*(arr!=1)] = 0
#arr = scipy.ndimage.gaussian_filter(arr, 45e-6/spacing)
#curve = pg.IsocurveItem(arr, 0.2)
#spots = self.data[(1-self.data['prob'] < self.ctrl.significanceSpin.value())*(self.data['prob'] != 0)]
#if 'spotSize' in self.data.dtype.names:
#self.outline.setRadius(self.data[1]['spotSize']/2.)
#self.outline.setData(spots)
def emitOutputChanged(self, obj):
self.sigOutputChanged.emit(obj)
#@staticmethod
def checkArrayInput(self, arr):
fields = arr.dtype.names
if 'xPos' not in fields or 'yPos' not in fields or 'numOfPostEvents' not in fields:
raise HelpfulException("Array input needs to have the following fields: 'xPos', 'yPos', 'numOfPostEvents'. Current fields are: %s" %str(fields))
else:
return True
#@staticmethod
#def bendelsSpatialCorrelationAlgorithm(data, radius, spontRate, timeWindow):
#SpatialCorrelator.checkArrayInput(data) ## check that data has 'xPos', 'yPos' and 'numOfPostEvents'
### add 'prob' field to data array
#if 'prob' not in data.dtype.names:
#arr = np.zeros(len(data), dtype=data.dtype.descr + [('prob', float)])
#arr[:] = data
#data = arr
#else:
#data['prob']=0
### spatial correlation algorithm from :
### Bendels, MHK; Beed, P; Schmitz, D; Johenning, FW; and Leibold C. Etection of input sites in
### scanning photostimulation data based on spatial correlations. 2010. Journal of Neuroscience Methods.
### calculate probability of seeing a spontaneous event in time window
#p = 1-np.exp(-spontRate*timeWindow)
### for each spot, calculate the probability of having the events in nearby spots occur randomly
#for x in data:
#spots = data[(np.sqrt((data['xPos']-x['xPos'])**2+(data['yPos']-x['yPos'])**2)) < radius]
#nSpots = len(spots)
#nEventSpots = len(spots[spots['numOfPostEvents'] > 0])
#prob = 0
#for j in range(nEventSpots, nSpots+1):
#prob += ((p**j)*((1-p)**(nSpots-j))*math.factorial(nEventSpots))/(math.factorial(j)*math.factorial(nSpots-j))
##j = arange(nEventSponts, nSpots+1)
##prob = (((p**j)*((1-p)**(nSpots-j))*np.factorial(nEventSpots))/(np.factorial(j)*np.factorial(nSpots-j))).sum() ## need a factorial function that works on arrays
#x['prob'] = prob
#return data
#class SpatialOutline(pg.GraphicsObject):
#def __init__(self, parent=None, pen=None, spots=None, radius=25e-6):
#pg.GraphicsObject.__init__(self, parent)
#if pen is None:
#pen = (255, 255, 255)
#self.setPen(pen)
#self.path = Qt.QPainterPath()
#self.spots = spots
#self.radius = radius
#if spots is not None:
#self.makePath()
#def setData(self, spots):
#self.spots = spots
#self.makePath()
#self.update(self.boundingRect())
#def setRadius(self, radius):
#self.radius = radius
#self.makePath()
#self.update(self.boundingRect())
#def setPen(self, pen):
#self.pen = pg.mkPen(pen)
#self.currentPen = self.pen
#self.update()
#def makePath(self):
#if self.spots is None:
#return
#path = Qt.QPainterPath()
#for s in self.spots:
#path.addEllipse(s['xPos'], s['yPos'], self.radius, self.radius)
##pps = Qt.QPainterPathStroker()
##self.path = pps.createStroke(path)
#self.path=path
#def boundingRect(self):
#if self.spots is None:
#return Qt.QRectF()
##x = self.spots['xPos'].min()
##y = self.spots['yPos'].min()
##return Qt.QRectF(x,y , self.spots['xPos'].max()-x, self.spots['yPos'].max()-y)
##print "outline.boundingRect: ", self.path.boundingRect()
#return self.path.boundingRect()
#def paint(self, p, *args):
#p.setRenderHint(Qt.QPainter.Antialiasing)
##path = self.shape()
#p.setPen(self.currentPen)
#p.drawPath(self.path)
##p.setPen(pg.mkPen(255,0,0))
##p.drawPath(self.shape())
#p.setPen(pg.mkPen(0,0,255))
#p.drawRect(self.boundingRect())
| |
"""A setuptools based setup module.
"""
from __future__ import print_function
import os
import fnmatch
import re
import sys
import subprocess
import yaml
# Always prefer setuptools over distutils
from setuptools import setup, Command
from setuptools_lint.setuptools_command import PylintCommand
from six import string_types
from six.moves import reload_module
from yamllint.config import YamlLintConfig
from yamllint.cli import Format
from yamllint import linter
def find_files(base_dir, exclude_dirs, include_dirs, file_regex):
''' find files matching file_regex '''
found = []
exclude_regex = ''
include_regex = ''
if exclude_dirs is not None:
exclude_regex = r'|'.join([fnmatch.translate(x) for x in exclude_dirs]) or r'$.'
# Don't use include_dirs, it is broken
if include_dirs is not None:
include_regex = r'|'.join([fnmatch.translate(x) for x in include_dirs]) or r'$.'
for root, dirs, files in os.walk(base_dir):
if exclude_dirs is not None:
# filter out excludes for dirs
dirs[:] = [d for d in dirs if not re.match(exclude_regex, d)]
if include_dirs is not None:
# filter for includes for dirs
dirs[:] = [d for d in dirs if re.match(include_regex, d)]
matches = [os.path.join(root, f) for f in files if re.search(file_regex, f) is not None]
found.extend(matches)
return found
def recursive_search(search_list, field):
"""
Takes a list with nested dicts, and searches all dicts for a key of the
field provided. If the items in the list are not dicts, the items are not
processed.
"""
fields_found = []
for item in search_list:
if isinstance(item, dict):
for key, value in item.items():
if key == field:
fields_found.append(value)
elif isinstance(value, list):
results = recursive_search(value, field)
for result in results:
fields_found.append(result)
return fields_found
def find_playbooks():
''' find Ansible playbooks'''
all_playbooks = set()
included_playbooks = set()
exclude_dirs = ('adhoc', 'tasks')
for yaml_file in find_files(
os.path.join(os.getcwd(), 'playbooks'),
exclude_dirs, None, r'\.ya?ml$'):
with open(yaml_file, 'r') as contents:
for task in yaml.safe_load_all(contents) or {}:
if not isinstance(task, dict):
# Skip yaml files which are not a dictionary of tasks
continue
if 'include' in task or 'import_playbook' in task:
# Add the playbook and capture included playbooks
all_playbooks.add(yaml_file)
if 'include' in task:
directive = task['include']
else:
directive = task['import_playbook']
included_file_name = directive.split()[0]
included_file = os.path.normpath(
os.path.join(os.path.dirname(yaml_file),
included_file_name))
included_playbooks.add(included_file)
elif 'hosts' in task:
all_playbooks.add(yaml_file)
return all_playbooks, included_playbooks
class OpenShiftAnsibleYamlLint(Command):
''' Command to run yamllint '''
description = "Run yamllint tests"
user_options = [
('excludes=', 'e', 'directories to exclude'),
('config-file=', 'c', 'config file to use'),
('format=', 'f', 'format to use (standard, parsable)'),
]
def initialize_options(self):
''' initialize_options '''
# Reason: Defining these attributes as a part of initialize_options is
# consistent with upstream usage
# Status: permanently disabled
# pylint: disable=attribute-defined-outside-init
self.excludes = None
self.config_file = None
self.format = None
def finalize_options(self):
''' finalize_options '''
# Reason: These attributes are defined in initialize_options and this
# usage is consistant with upstream usage
# Status: permanently disabled
# pylint: disable=attribute-defined-outside-init
if isinstance(self.excludes, string_types):
self.excludes = self.excludes.split(',')
if self.format is None:
self.format = 'standard'
assert (self.format in ['standard', 'parsable']), (
'unknown format {0}.'.format(self.format))
if self.config_file is None:
self.config_file = '.yamllint'
assert os.path.isfile(self.config_file), (
'yamllint config file {0} does not exist.'.format(self.config_file))
def run(self):
''' run command '''
if self.excludes is not None:
print("Excludes:\n{0}".format(yaml.dump(self.excludes, default_flow_style=False)))
config = YamlLintConfig(file=self.config_file)
has_errors = False
has_warnings = False
if self.format == 'parsable':
format_method = Format.parsable
else:
format_method = Format.standard_color
for yaml_file in find_files(os.getcwd(), self.excludes, None, r'\.ya?ml$'):
first = True
with open(yaml_file, 'r') as contents:
for problem in linter.run(contents, config):
if first and self.format != 'parsable':
print('\n{0}:'.format(os.path.relpath(yaml_file)))
first = False
print(format_method(problem, yaml_file))
if problem.level == linter.PROBLEM_LEVELS[2]:
has_errors = True
elif problem.level == linter.PROBLEM_LEVELS[1]:
has_warnings = True
if has_errors or has_warnings:
print('yamllint issues found')
raise SystemExit(1)
class OpenShiftAnsiblePylint(PylintCommand):
''' Class to override the default behavior of PylintCommand '''
# Reason: This method needs to be an instance method to conform to the
# overridden method's signature
# Status: permanently disabled
# pylint: disable=no-self-use
def find_all_modules(self):
''' find all python files to test '''
exclude_dirs = ('.tox', 'test', 'tests', 'git')
modules = []
for match in find_files(os.getcwd(), exclude_dirs, None, r'\.py$'):
package = os.path.basename(match).replace('.py', '')
modules.append(('openshift_ansible', package, match))
return modules
def get_finalized_command(self, cmd):
''' override get_finalized_command to ensure we use our
find_all_modules method '''
if cmd == 'build_py':
return self
# Reason: This method needs to be an instance method to conform to the
# overridden method's signature
# Status: permanently disabled
# pylint: disable=no-self-use
def with_project_on_sys_path(self, func, func_args, func_kwargs):
''' override behavior, since we don't need to build '''
return func(*func_args, **func_kwargs)
class OpenShiftAnsibleGenerateValidation(Command):
''' Command to run generated module validation'''
description = "Run generated module validation"
user_options = []
def initialize_options(self):
''' initialize_options '''
pass
def finalize_options(self):
''' finalize_options '''
pass
# self isn't used but I believe is required when it is called.
# pylint: disable=no-self-use
def run(self):
''' run command '''
# find the files that call generate
generate_files = find_files('roles',
['inventory',
'test',
'playbooks'],
None,
'generate.py$')
if len(generate_files) < 1:
print('Did not find any code generation. Please verify module code generation.') # noqa: E501
raise SystemExit(1)
errors = False
for gen in generate_files:
print('Checking generated module code: {0}'.format(gen))
try:
sys.path.insert(0, os.path.dirname(gen))
# we are importing dynamically. This isn't in
# the python path.
# pylint: disable=import-error
import generate
reload_module(generate)
generate.verify()
except generate.GenerateAnsibleException as gae:
print(gae.args)
errors = True
if errors:
print('Found errors while generating module code.')
raise SystemExit(1)
print('\nAll generate scripts passed.\n')
class OpenShiftAnsibleSyntaxCheck(Command):
''' Command to run Ansible syntax check'''
description = "Run Ansible syntax check"
user_options = []
# Colors
FAIL = '\033[31m' # Red
ENDC = '\033[0m' # Reset
def initialize_options(self):
''' initialize_options '''
pass
def finalize_options(self):
''' finalize_options '''
pass
def deprecate_jinja2_in_when(self, yaml_contents, yaml_file):
''' Check for Jinja2 templating delimiters in when conditions '''
test_result = False
failed_items = []
search_results = recursive_search(yaml_contents, 'when')
for item in search_results:
if isinstance(item, str):
if '{{' in item or '{%' in item:
failed_items.append(item)
else:
for sub_item in item:
if '{{' in sub_item or '{%' in sub_item:
failed_items.append(sub_item)
if len(failed_items) > 0:
print('{}Error: Usage of Jinja2 templating delimiters in when '
'conditions is deprecated in Ansible 2.3.\n'
' File: {}'.format(self.FAIL, yaml_file))
for item in failed_items:
print(' Found: "{}"'.format(item))
print(self.ENDC)
test_result = True
return test_result
def deprecate_include(self, yaml_contents, yaml_file):
''' Check for usage of include directive '''
test_result = False
search_results = recursive_search(yaml_contents, 'include')
if len(search_results) > 0:
print('{}Error: The `include` directive is deprecated in Ansible 2.4.\n'
'https://github.com/ansible/ansible/blob/devel/CHANGELOG.md\n'
' File: {}'.format(self.FAIL, yaml_file))
for item in search_results:
print(' Found: "include: {}"'.format(item))
print(self.ENDC)
test_result = True
return test_result
def run(self):
''' run command '''
has_errors = False
print('#' * 60)
print('Ansible Deprecation Checks')
exclude_dirs = ('adhoc', 'files', 'meta', 'vars', 'defaults', '.tox')
for yaml_file in find_files(
os.getcwd(), exclude_dirs, None, r'\.ya?ml$'):
with open(yaml_file, 'r') as contents:
yaml_contents = yaml.safe_load_all(contents)
if not isinstance(yaml_contents, list):
continue
# Check for Jinja2 templating delimiters in when conditions
result = self.deprecate_jinja2_in_when(yaml_contents, yaml_file)
has_errors = result or has_errors
# Check for usage of include: directive
result = self.deprecate_include(yaml_contents, yaml_file)
has_errors = result or has_errors
if not has_errors:
print('...PASSED')
all_playbooks, included_playbooks = find_playbooks()
print('#' * 60)
print('Invalid Playbook Include Checks')
invalid_include = []
for playbook in included_playbooks:
# Ignore imported playbooks in 'common', 'private' and 'init'. It is
# expected that these locations would be imported by entry point
# playbooks.
# Ignore playbooks in 'aws', 'gcp' and 'openstack' because these
# playbooks do not follow the same component entry point structure.
# Ignore deploy_cluster.yml and prerequisites.yml because these are
# entry point playbooks but are imported by playbooks in the cloud
# provisioning playbooks.
ignored = ('common', 'private', 'init',
'aws', 'gcp', 'openstack',
'deploy_cluster.yml', 'prerequisites.yml')
if any(x in playbook for x in ignored):
continue
invalid_include.append(playbook)
if invalid_include:
print('{}Invalid included playbook(s) found. Please ensure'
' component entry point playbooks are not included{}'.format(self.FAIL, self.ENDC))
invalid_include.sort()
for playbook in invalid_include:
print('{}{}{}'.format(self.FAIL, playbook, self.ENDC))
has_errors = True
if not has_errors:
print('...PASSED')
print('#' * 60)
print('Ansible Playbook Entry Point Syntax Checks')
# Evaluate the difference between all playbooks and included playbooks
entrypoint_playbooks = sorted(all_playbooks.difference(included_playbooks))
print('Entry point playbook count: {}'.format(len(entrypoint_playbooks)))
for playbook in entrypoint_playbooks:
print('-' * 60)
print('Syntax checking playbook: {}'.format(playbook))
# Error on any entry points in 'common' or 'private'
invalid_entry_point = ('common', 'private')
if any(x in playbook for x in invalid_entry_point):
print('{}Invalid entry point playbook or orphaned file. Entry'
' point playbooks are not allowed in \'common\' or'
' \'private\' directories{}'.format(self.FAIL, self.ENDC))
has_errors = True
# --syntax-check each entry point playbook
try:
# Create a host group list to avoid WARNING on unmatched host patterns
tox_ansible_inv = os.environ['TOX_ANSIBLE_INV_PATH']
subprocess.check_output(
['ansible-playbook', '-i', tox_ansible_inv,
'--syntax-check', playbook, '-e', '@{}_extras'.format(tox_ansible_inv)]
)
except subprocess.CalledProcessError as cpe:
print('{}Execution failed: {}{}'.format(
self.FAIL, cpe, self.ENDC))
has_errors = True
if has_errors:
raise SystemExit(1)
class UnsupportedCommand(Command):
''' Basic Command to override unsupported commands '''
user_options = []
# Reason: This method needs to be an instance method to conform to the
# overridden method's signature
# Status: permanently disabled
# pylint: disable=no-self-use
def initialize_options(self):
''' initialize_options '''
pass
# Reason: This method needs to be an instance method to conform to the
# overridden method's signature
# Status: permanently disabled
# pylint: disable=no-self-use
def finalize_options(self):
''' initialize_options '''
pass
# Reason: This method needs to be an instance method to conform to the
# overridden method's signature
# Status: permanently disabled
# pylint: disable=no-self-use
def run(self):
''' run command '''
print("Unsupported command for openshift-ansible")
setup(
name='openshift-ansible',
license="Apache 2.0",
cmdclass={
'install': UnsupportedCommand,
'develop': UnsupportedCommand,
'build': UnsupportedCommand,
'build_py': UnsupportedCommand,
'build_ext': UnsupportedCommand,
'egg_info': UnsupportedCommand,
'sdist': UnsupportedCommand,
'lint': OpenShiftAnsiblePylint,
'yamllint': OpenShiftAnsibleYamlLint,
'generate_validation': OpenShiftAnsibleGenerateValidation,
'ansible_syntax': OpenShiftAnsibleSyntaxCheck,
},
packages=[],
)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import StringIO
from lxml import etree
from keystone.common import serializer
from keystone import test
class XmlSerializerTestCase(test.TestCase):
def assertEqualXML(self, a, b):
"""Parses two XML documents from strings and compares the results.
This provides easy-to-read failures from nose.
"""
parser = etree.XMLParser(remove_blank_text=True)
def canonical_xml(s):
s = s.strip()
fp = StringIO.StringIO()
dom = etree.fromstring(s, parser)
dom.getroottree().write_c14n(fp)
s = fp.getvalue()
dom = etree.fromstring(s, parser)
return etree.tostring(dom, pretty_print=True)
a = canonical_xml(a)
b = canonical_xml(b)
self.assertEqual(a.split('\n'), b.split('\n'))
def assertSerializeDeserialize(self, d, xml, xmlns=None):
self.assertEqualXML(
serializer.to_xml(copy.deepcopy(d), xmlns),
xml)
self.assertEqual(serializer.from_xml(xml), d)
# operations should be invertible
self.assertEqual(
serializer.from_xml(serializer.to_xml(copy.deepcopy(d), xmlns)),
d)
self.assertEqualXML(
serializer.to_xml(serializer.from_xml(xml), xmlns),
xml)
def test_auth_request(self):
d = {
"auth": {
"passwordCredentials": {
"username": "test_user",
"password": "mypass"
},
"tenantName": "customer-x"
}
}
xml = """
<?xml version="1.0" encoding="UTF-8"?>
<auth xmlns="http://docs.openstack.org/identity/api/v2.0"
tenantName="customer-x">
<passwordCredentials
username="test_user"
password="mypass"/>
</auth>
"""
self.assertSerializeDeserialize(d, xml)
def test_role_crud(self):
d = {
"role": {
"id": "123",
"name": "Guest",
"description": "Guest Access"
}
}
# TODO(dolph): examples show this description as an attribute?
xml = """
<?xml version="1.0" encoding="UTF-8"?>
<role xmlns="http://docs.openstack.org/identity/api/v2.0"
id="123"
name="Guest">
<description>Guest Access</description>
</role>
"""
self.assertSerializeDeserialize(d, xml)
def test_service_crud(self):
xmlns = "http://docs.openstack.org/identity/api/ext/OS-KSADM/v1.0"
d = {
"OS-KSADM:service": {
"id": "123",
"name": "nova",
"type": "compute",
"description": "OpenStack Compute Service"
}
}
# TODO(dolph): examples show this description as an attribute?
xml = """
<?xml version="1.0" encoding="UTF-8"?>
<service
xmlns="%(xmlns)s"
type="compute"
id="123"
name="nova">
<description>OpenStack Compute Service</description>
</service>
""" % {'xmlns': xmlns}
self.assertSerializeDeserialize(d, xml, xmlns=xmlns)
def test_tenant_crud(self):
d = {
"tenant": {
"id": "1234",
"name": "ACME corp",
"description": "A description...",
"enabled": True
}
}
xml = """
<?xml version="1.0" encoding="UTF-8"?>
<tenant
xmlns="http://docs.openstack.org/identity/api/v2.0"
enabled="true"
id="1234"
name="ACME corp">
<description>A description...</description>
</tenant>
"""
self.assertSerializeDeserialize(d, xml)
def test_tenant_crud_no_description(self):
d = {
"tenant": {
"id": "1234",
"name": "ACME corp",
"description": "",
"enabled": True
}
}
xml = """
<?xml version="1.0" encoding="UTF-8"?>
<tenant
xmlns="http://docs.openstack.org/identity/api/v2.0"
enabled="true"
id="1234"
name="ACME corp">
<description></description>
</tenant>
"""
self.assertSerializeDeserialize(d, xml)
def test_policy_list(self):
d = {"policies": [{"id": "ab12cd"}]}
xml = """
<?xml version="1.0" encoding="UTF-8"?>
<policies xmlns="http://docs.openstack.org/identity/api/v2.0">
<policy id="ab12cd"/>
</policies>
"""
self.assertEqualXML(serializer.to_xml(d), xml)
def test_values_list(self):
d = {
"objects": {
"values": [{
"attribute": "value1",
}, {
"attribute": "value2",
}]
}
}
xml = """
<?xml version="1.0" encoding="UTF-8"?>
<objects xmlns="http://docs.openstack.org/identity/api/v2.0">
<object attribute="value1"/>
<object attribute="value2"/>
</objects>
"""
self.assertEqualXML(serializer.to_xml(d), xml)
def test_collection_list(self):
d = {
"links": {
"next": "http://localhost:5000/v3/objects?page=3",
"previous": None,
"self": "http://localhost:5000/v3/objects"
},
"objects": [{
"attribute": "value1",
"links": {
"self": "http://localhost:5000/v3/objects/abc123def",
"anotherobj": "http://localhost:5000/v3/anotherobjs/123"
}
}, {
"attribute": "value2",
"links": {
"self": "http://localhost:5000/v3/objects/abc456"
}
}]}
xml = """
<?xml version="1.0" encoding="UTF-8"?>
<objects xmlns="http://docs.openstack.org/identity/api/v2.0">
<object attribute="value1">
<links>
<link rel="self"
href="http://localhost:5000/v3/objects/abc123def"/>
<link rel="anotherobj"
href="http://localhost:5000/v3/anotherobjs/123"/>
</links>
</object>
<object attribute="value2">
<links>
<link rel="self"
href="http://localhost:5000/v3/objects/abc456"/>
</links>
</object>
<links>
<link rel="self"
href="http://localhost:5000/v3/objects"/>
<link rel="next"
href="http://localhost:5000/v3/objects?page=3"/>
</links>
</objects>
"""
self.assertSerializeDeserialize(d, xml)
def test_collection_member(self):
d = {
"object": {
"attribute": "value",
"links": {
"self": "http://localhost:5000/v3/objects/abc123def",
"anotherobj": "http://localhost:5000/v3/anotherobjs/123"}}}
xml = """
<?xml version="1.0" encoding="UTF-8"?>
<object xmlns="http://docs.openstack.org/identity/api/v2.0"
attribute="value">
<links>
<link rel="self"
href="http://localhost:5000/v3/objects/abc123def"/>
<link rel="anotherobj"
href="http://localhost:5000/v3/anotherobjs/123"/>
</links>
</object>
"""
self.assertSerializeDeserialize(d, xml)
| |
import codecs
import logging
import os
import sys
from collections import namedtuple
import six
import yaml
from .errors import CircularReference
from .errors import ComposeFileNotFound
from .errors import ConfigurationError
from .interpolation import interpolate_environment_variables
from .validation import validate_against_fields_schema
from .validation import validate_against_service_schema
from .validation import validate_extended_service_exists
from .validation import validate_extends_file_path
from .validation import validate_top_level_object
DOCKER_CONFIG_KEYS = [
'cap_add',
'cap_drop',
'cgroup_parent',
'command',
'cpu_shares',
'cpuset',
'detach',
'devices',
'dns',
'dns_search',
'domainname',
'entrypoint',
'env_file',
'environment',
'extra_hosts',
'hostname',
'image',
'ipc',
'labels',
'links',
'log_driver',
'log_opt',
'mac_address',
'mem_limit',
'memswap_limit',
'net',
'pid',
'ports',
'privileged',
'read_only',
'restart',
'security_opt',
'stdin_open',
'tty',
'user',
'volume_driver',
'volumes',
'volumes_from',
'working_dir',
]
ALLOWED_KEYS = DOCKER_CONFIG_KEYS + [
'build',
'container_name',
'dockerfile',
'expose',
'external_links',
'name',
]
SUPPORTED_FILENAMES = [
'docker-compose.yml',
'docker-compose.yaml',
'fig.yml',
'fig.yaml',
]
DEFAULT_OVERRIDE_FILENAME = 'docker-compose.override.yml'
log = logging.getLogger(__name__)
class ConfigDetails(namedtuple('_ConfigDetails', 'working_dir config_files')):
"""
:param working_dir: the directory to use for relative paths in the config
:type working_dir: string
:param config_files: list of configuration files to load
:type config_files: list of :class:`ConfigFile`
"""
class ConfigFile(namedtuple('_ConfigFile', 'filename config')):
"""
:param filename: filename of the config file
:type filename: string
:param config: contents of the config file
:type config: :class:`dict`
"""
def find(base_dir, filenames):
if filenames == ['-']:
return ConfigDetails(
os.getcwd(),
[ConfigFile(None, yaml.safe_load(sys.stdin))])
if filenames:
filenames = [os.path.join(base_dir, f) for f in filenames]
else:
filenames = get_default_config_files(base_dir)
log.debug("Using configuration files: {}".format(",".join(filenames)))
return ConfigDetails(
os.path.dirname(filenames[0]),
[ConfigFile(f, load_yaml(f)) for f in filenames])
def get_default_config_files(base_dir):
(candidates, path) = find_candidates_in_parent_dirs(SUPPORTED_FILENAMES, base_dir)
if not candidates:
raise ComposeFileNotFound(SUPPORTED_FILENAMES)
winner = candidates[0]
if len(candidates) > 1:
log.warn("Found multiple config files with supported names: %s", ", ".join(candidates))
log.warn("Using %s\n", winner)
if winner == 'docker-compose.yaml':
log.warn("Please be aware that .yml is the expected extension "
"in most cases, and using .yaml can cause compatibility "
"issues in future.\n")
if winner.startswith("fig."):
log.warn("%s is deprecated and will not be supported in future. "
"Please rename your config file to docker-compose.yml\n" % winner)
return [os.path.join(path, winner)] + get_default_override_file(path)
def get_default_override_file(path):
override_filename = os.path.join(path, DEFAULT_OVERRIDE_FILENAME)
return [override_filename] if os.path.exists(override_filename) else []
def find_candidates_in_parent_dirs(filenames, path):
"""
Given a directory path to start, looks for filenames in the
directory, and then each parent directory successively,
until found.
Returns tuple (candidates, path).
"""
candidates = [filename for filename in filenames
if os.path.exists(os.path.join(path, filename))]
if not candidates:
parent_dir = os.path.join(path, '..')
if os.path.abspath(parent_dir) != os.path.abspath(path):
return find_candidates_in_parent_dirs(filenames, parent_dir)
return (candidates, path)
def load(config_details):
"""Load the configuration from a working directory and a list of
configuration files. Files are loaded in order, and merged on top
of each other to create the final configuration.
Return a fully interpolated, extended and validated configuration.
"""
def build_service(filename, service_name, service_dict):
loader = ServiceLoader(
config_details.working_dir,
filename,
service_name,
service_dict)
service_dict = loader.make_service_dict()
validate_paths(service_dict)
return service_dict
def load_file(filename, config):
processed_config = interpolate_environment_variables(config)
validate_against_fields_schema(processed_config)
return [
build_service(filename, name, service_config)
for name, service_config in processed_config.items()
]
def merge_services(base, override):
all_service_names = set(base) | set(override)
return {
name: merge_service_dicts_from_files(
base.get(name, {}),
override.get(name, {}))
for name in all_service_names
}
config_file = config_details.config_files[0]
validate_top_level_object(config_file.config)
for next_file in config_details.config_files[1:]:
validate_top_level_object(next_file.config)
config_file = ConfigFile(
config_file.filename,
merge_services(config_file.config, next_file.config))
return load_file(config_file.filename, config_file.config)
class ServiceLoader(object):
def __init__(
self,
working_dir,
filename,
service_name,
service_dict,
already_seen=None
):
if working_dir is None:
raise ValueError("No working_dir passed to ServiceLoader()")
self.working_dir = os.path.abspath(working_dir)
if filename:
self.filename = os.path.abspath(filename)
else:
self.filename = filename
self.already_seen = already_seen or []
self.service_dict = service_dict.copy()
self.service_name = service_name
self.service_dict['name'] = service_name
def detect_cycle(self, name):
if self.signature(name) in self.already_seen:
raise CircularReference(self.already_seen + [self.signature(name)])
def make_service_dict(self):
service_dict = dict(self.service_dict)
env = resolve_environment(self.working_dir, self.service_dict)
if env:
service_dict['environment'] = env
service_dict.pop('env_file', None)
if 'extends' in service_dict:
service_dict = self.resolve_extends(*self.validate_and_construct_extends())
if not self.already_seen:
validate_against_service_schema(service_dict, self.service_name)
return process_container_options(service_dict, working_dir=self.working_dir)
def validate_and_construct_extends(self):
extends = self.service_dict['extends']
if not isinstance(extends, dict):
extends = {'service': extends}
validate_extends_file_path(self.service_name, extends, self.filename)
config_path = self.get_extended_config_path(extends)
service_name = extends['service']
config = load_yaml(config_path)
validate_top_level_object(config)
full_extended_config = interpolate_environment_variables(config)
validate_extended_service_exists(
service_name,
full_extended_config,
config_path
)
validate_against_fields_schema(full_extended_config)
service_config = full_extended_config[service_name]
return config_path, service_config, service_name
def resolve_extends(self, extended_config_path, service_config, service_name):
other_working_dir = os.path.dirname(extended_config_path)
other_already_seen = self.already_seen + [self.signature(self.service_name)]
other_loader = ServiceLoader(
other_working_dir,
extended_config_path,
self.service_name,
service_config,
already_seen=other_already_seen,
)
other_loader.detect_cycle(service_name)
other_service_dict = other_loader.make_service_dict()
validate_extended_service_dict(
other_service_dict,
extended_config_path,
service_name,
)
return merge_service_dicts(other_service_dict, self.service_dict)
def get_extended_config_path(self, extends_options):
"""Service we are extending either has a value for 'file' set, which we
need to obtain a full path too or we are extending from a service
defined in our own file.
"""
if 'file' in extends_options:
return expand_path(self.working_dir, extends_options['file'])
return self.filename
def signature(self, name):
return self.filename, name
def resolve_environment(working_dir, service_dict):
"""Unpack any environment variables from an env_file, if set.
Interpolate environment values if set.
"""
if 'environment' not in service_dict and 'env_file' not in service_dict:
return {}
env = {}
if 'env_file' in service_dict:
for env_file in get_env_files(service_dict, working_dir=working_dir):
env.update(env_vars_from_file(env_file))
env.update(parse_environment(service_dict.get('environment')))
return dict(resolve_env_var(k, v) for k, v in six.iteritems(env))
def validate_extended_service_dict(service_dict, filename, service):
error_prefix = "Cannot extend service '%s' in %s:" % (service, filename)
if 'links' in service_dict:
raise ConfigurationError(
"%s services with 'links' cannot be extended" % error_prefix)
if 'volumes_from' in service_dict:
raise ConfigurationError(
"%s services with 'volumes_from' cannot be extended" % error_prefix)
if 'net' in service_dict:
if get_service_name_from_net(service_dict['net']) is not None:
raise ConfigurationError(
"%s services with 'net: container' cannot be extended" % error_prefix)
def validate_ulimits(ulimit_config):
for limit_name, soft_hard_values in six.iteritems(ulimit_config):
if isinstance(soft_hard_values, dict):
if not soft_hard_values['soft'] <= soft_hard_values['hard']:
raise ConfigurationError(
"ulimit_config \"%s\" cannot contain a 'soft' value higher than 'hard' value" %
ulimit_config
)
def process_container_options(service_dict, working_dir=None):
service_dict = service_dict.copy()
if 'volumes' in service_dict and service_dict.get('volume_driver') is None:
service_dict['volumes'] = resolve_volume_paths(service_dict, working_dir=working_dir)
if 'build' in service_dict:
service_dict['build'] = resolve_build_path(service_dict['build'], working_dir=working_dir)
if 'labels' in service_dict:
service_dict['labels'] = parse_labels(service_dict['labels'])
if 'ulimits' in service_dict:
validate_ulimits(service_dict['ulimits'])
return service_dict
def merge_service_dicts_from_files(base, override):
"""When merging services from multiple files we need to merge the `extends`
field. This is not handled by `merge_service_dicts()` which is used to
perform the `extends`.
"""
new_service = merge_service_dicts(base, override)
if 'extends' in override:
new_service['extends'] = override['extends']
return new_service
def merge_service_dicts(base, override):
d = base.copy()
if 'environment' in base or 'environment' in override:
d['environment'] = merge_environment(
base.get('environment'),
override.get('environment'),
)
path_mapping_keys = ['volumes', 'devices']
for key in path_mapping_keys:
if key in base or key in override:
d[key] = merge_path_mappings(
base.get(key),
override.get(key),
)
if 'labels' in base or 'labels' in override:
d['labels'] = merge_labels(
base.get('labels'),
override.get('labels'),
)
if 'image' in override and 'build' in d:
del d['build']
if 'build' in override and 'image' in d:
del d['image']
list_keys = ['ports', 'expose', 'external_links']
for key in list_keys:
if key in base or key in override:
d[key] = base.get(key, []) + override.get(key, [])
list_or_string_keys = ['dns', 'dns_search']
for key in list_or_string_keys:
if key in base or key in override:
d[key] = to_list(base.get(key)) + to_list(override.get(key))
already_merged_keys = ['environment', 'labels'] + path_mapping_keys + list_keys + list_or_string_keys
for k in set(ALLOWED_KEYS) - set(already_merged_keys):
if k in override:
d[k] = override[k]
return d
def merge_environment(base, override):
env = parse_environment(base)
env.update(parse_environment(override))
return env
def get_env_files(options, working_dir=None):
if 'env_file' not in options:
return {}
env_files = options.get('env_file', [])
if not isinstance(env_files, list):
env_files = [env_files]
return [expand_path(working_dir, path) for path in env_files]
def parse_environment(environment):
if not environment:
return {}
if isinstance(environment, list):
return dict(split_env(e) for e in environment)
if isinstance(environment, dict):
return dict(environment)
raise ConfigurationError(
"environment \"%s\" must be a list or mapping," %
environment
)
def split_env(env):
if isinstance(env, six.binary_type):
env = env.decode('utf-8')
if '=' in env:
return env.split('=', 1)
else:
return env, None
def resolve_env_var(key, val):
if val is not None:
return key, val
elif key in os.environ:
return key, os.environ[key]
else:
return key, ''
def env_vars_from_file(filename):
"""
Read in a line delimited file of environment variables.
"""
if not os.path.exists(filename):
raise ConfigurationError("Couldn't find env file: %s" % filename)
env = {}
for line in codecs.open(filename, 'r', 'utf-8'):
line = line.strip()
if line and not line.startswith('#'):
k, v = split_env(line)
env[k] = v
return env
def resolve_volume_paths(service_dict, working_dir=None):
if working_dir is None:
raise Exception("No working_dir passed to resolve_volume_paths()")
return [
resolve_volume_path(v, working_dir, service_dict['name'])
for v in service_dict['volumes']
]
def resolve_volume_path(volume, working_dir, service_name):
container_path, host_path = split_path_mapping(volume)
if host_path is not None:
if host_path.startswith('.'):
host_path = expand_path(working_dir, host_path)
host_path = os.path.expanduser(host_path)
return u"{}:{}".format(host_path, container_path)
else:
return container_path
def resolve_build_path(build_path, working_dir=None):
if working_dir is None:
raise Exception("No working_dir passed to resolve_build_path")
return expand_path(working_dir, build_path)
def validate_paths(service_dict):
if 'build' in service_dict:
build_path = service_dict['build']
if not os.path.exists(build_path) or not os.access(build_path, os.R_OK):
raise ConfigurationError("build path %s either does not exist or is not accessible." % build_path)
def merge_path_mappings(base, override):
d = dict_from_path_mappings(base)
d.update(dict_from_path_mappings(override))
return path_mappings_from_dict(d)
def dict_from_path_mappings(path_mappings):
if path_mappings:
return dict(split_path_mapping(v) for v in path_mappings)
else:
return {}
def path_mappings_from_dict(d):
return [join_path_mapping(v) for v in d.items()]
def split_path_mapping(volume_path):
"""
Ascertain if the volume_path contains a host path as well as a container
path. Using splitdrive so windows absolute paths won't cause issues with
splitting on ':'.
"""
# splitdrive has limitations when it comes to relative paths, so when it's
# relative, handle special case to set the drive to ''
if volume_path.startswith('.') or volume_path.startswith('~'):
drive, volume_config = '', volume_path
else:
drive, volume_config = os.path.splitdrive(volume_path)
if ':' in volume_config:
(host, container) = volume_config.split(':', 1)
return (container, drive + host)
else:
return (volume_path, None)
def join_path_mapping(pair):
(container, host) = pair
if host is None:
return container
else:
return ":".join((host, container))
def merge_labels(base, override):
labels = parse_labels(base)
labels.update(parse_labels(override))
return labels
def parse_labels(labels):
if not labels:
return {}
if isinstance(labels, list):
return dict(split_label(e) for e in labels)
if isinstance(labels, dict):
return labels
def split_label(label):
if '=' in label:
return label.split('=', 1)
else:
return label, ''
def expand_path(working_dir, path):
return os.path.abspath(os.path.join(working_dir, os.path.expanduser(path)))
def to_list(value):
if value is None:
return []
elif isinstance(value, six.string_types):
return [value]
else:
return value
def get_service_name_from_net(net_config):
if not net_config:
return
if not net_config.startswith('container:'):
return
_, net_name = net_config.split(':', 1)
return net_name
def load_yaml(filename):
try:
with open(filename, 'r') as fh:
return yaml.safe_load(fh)
except (IOError, yaml.YAMLError) as e:
error_name = getattr(e, '__module__', '') + '.' + e.__class__.__name__
raise ConfigurationError(u"{}: {}".format(error_name, e))
| |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for LocalFileSystem."""
# pytype: skip-file
import filecmp
import logging
import os
import shutil
import tempfile
import unittest
import mock
from apache_beam.io import localfilesystem
from apache_beam.io.filesystem import BeamIOError
from apache_beam.io.filesystems import FileSystems
def _gen_fake_join(separator):
"""Returns a callable that joins paths with the given separator."""
def _join(first_path, *paths):
return separator.join((first_path.rstrip(separator), ) + paths)
return _join
class FileSystemsTest(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def test_get_scheme(self):
self.assertIsNone(FileSystems.get_scheme('/abc/cdf'))
self.assertIsNone(FileSystems.get_scheme('c:\\abc\cdf')) # pylint: disable=anomalous-backslash-in-string
self.assertEqual(FileSystems.get_scheme('gs://abc/cdf'), 'gs')
def test_get_filesystem(self):
self.assertTrue(
isinstance(
FileSystems.get_filesystem('/tmp'),
localfilesystem.LocalFileSystem))
self.assertTrue(isinstance(FileSystems.get_filesystem('c:\\abc\def'), # pylint: disable=anomalous-backslash-in-string
localfilesystem.LocalFileSystem))
with self.assertRaises(ValueError):
FileSystems.get_filesystem('error://abc/def')
@mock.patch('apache_beam.io.localfilesystem.os')
def test_unix_path_join(self, *unused_mocks):
# Test joining of Unix paths.
localfilesystem.os.path.join.side_effect = _gen_fake_join('/')
self.assertEqual(
'/tmp/path/to/file', FileSystems.join('/tmp/path', 'to', 'file'))
self.assertEqual(
'/tmp/path/to/file', FileSystems.join('/tmp/path', 'to/file'))
self.assertEqual(
'/tmp/path/to/file', FileSystems.join('/', 'tmp/path', 'to/file'))
self.assertEqual(
'/tmp/path/to/file', FileSystems.join('/tmp/', 'path', 'to/file'))
@mock.patch('apache_beam.io.localfilesystem.os')
def test_windows_path_join(self, *unused_mocks):
# Test joining of Windows paths.
localfilesystem.os.path.join.side_effect = _gen_fake_join('\\')
self.assertEqual(
r'C:\tmp\path\to\file', FileSystems.join(r'C:\tmp\path', 'to', 'file'))
self.assertEqual(
r'C:\tmp\path\to\file', FileSystems.join(r'C:\tmp\path', r'to\file'))
self.assertEqual(
r'C:\tmp\path\to\file',
FileSystems.join(r'C:\tmp\path\\', 'to', 'file'))
def test_mkdirs(self):
path = os.path.join(self.tmpdir, 't1/t2')
FileSystems.mkdirs(path)
self.assertTrue(os.path.isdir(path))
def test_mkdirs_failed(self):
path = os.path.join(self.tmpdir, 't1/t2')
FileSystems.mkdirs(path)
# Check IOError if existing directory is created
with self.assertRaises(IOError):
FileSystems.mkdirs(path)
with self.assertRaises(IOError):
FileSystems.mkdirs(os.path.join(self.tmpdir, 't1'))
def test_match_file(self):
path = os.path.join(self.tmpdir, 'f1')
open(path, 'a').close()
# Match files in the temp directory
result = FileSystems.match([path])[0]
files = [f.path for f in result.metadata_list]
self.assertEqual(files, [path])
def test_match_file_empty(self):
path = os.path.join(self.tmpdir, 'f2') # Does not exist
# Match files in the temp directory
result = FileSystems.match([path])[0]
files = [f.path for f in result.metadata_list]
self.assertEqual(files, [])
def test_match_file_exception(self):
# Match files with None so that it throws an exception
with self.assertRaisesRegex(BeamIOError,
r'^Unable to get the Filesystem') as error:
FileSystems.match([None])
self.assertEqual(list(error.exception.exception_details), [None])
def test_match_directory_with_files(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
open(path1, 'a').close()
open(path2, 'a').close()
# Match both the files in the directory
path = os.path.join(self.tmpdir, '*')
result = FileSystems.match([path])[0]
files = [f.path for f in result.metadata_list]
self.assertCountEqual(files, [path1, path2])
def test_match_directory(self):
result = FileSystems.match([self.tmpdir])[0]
files = [f.path for f in result.metadata_list]
self.assertEqual(files, [self.tmpdir])
def test_copy(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
with open(path1, 'a') as f:
f.write('Hello')
FileSystems.copy([path1], [path2])
self.assertTrue(filecmp.cmp(path1, path2))
def test_copy_error(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
with self.assertRaisesRegex(BeamIOError,
r'^Copy operation failed') as error:
FileSystems.copy([path1], [path2])
self.assertEqual(
list(error.exception.exception_details.keys()), [(path1, path2)])
def test_copy_directory(self):
path_t1 = os.path.join(self.tmpdir, 't1')
path_t2 = os.path.join(self.tmpdir, 't2')
FileSystems.mkdirs(path_t1)
FileSystems.mkdirs(path_t2)
path1 = os.path.join(path_t1, 'f1')
path2 = os.path.join(path_t2, 'f1')
with open(path1, 'a') as f:
f.write('Hello')
FileSystems.copy([path_t1], [path_t2])
self.assertTrue(filecmp.cmp(path1, path2))
def test_rename(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
with open(path1, 'a') as f:
f.write('Hello')
FileSystems.rename([path1], [path2])
self.assertTrue(FileSystems.exists(path2))
self.assertFalse(FileSystems.exists(path1))
def test_rename_error(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
with self.assertRaisesRegex(BeamIOError,
r'^Rename operation failed') as error:
FileSystems.rename([path1], [path2])
self.assertEqual(
list(error.exception.exception_details.keys()), [(path1, path2)])
def test_rename_directory(self):
path_t1 = os.path.join(self.tmpdir, 't1')
path_t2 = os.path.join(self.tmpdir, 't2')
FileSystems.mkdirs(path_t1)
path1 = os.path.join(path_t1, 'f1')
path2 = os.path.join(path_t2, 'f1')
with open(path1, 'a') as f:
f.write('Hello')
FileSystems.rename([path_t1], [path_t2])
self.assertTrue(FileSystems.exists(path_t2))
self.assertFalse(FileSystems.exists(path_t1))
self.assertTrue(FileSystems.exists(path2))
self.assertFalse(FileSystems.exists(path1))
def test_exists(self):
path1 = os.path.join(self.tmpdir, 'f1')
path2 = os.path.join(self.tmpdir, 'f2')
with open(path1, 'a') as f:
f.write('Hello')
self.assertTrue(FileSystems.exists(path1))
self.assertFalse(FileSystems.exists(path2))
def test_delete(self):
path1 = os.path.join(self.tmpdir, 'f1')
with open(path1, 'a') as f:
f.write('Hello')
self.assertTrue(FileSystems.exists(path1))
FileSystems.delete([path1])
self.assertFalse(FileSystems.exists(path1))
def test_delete_error(self):
path1 = os.path.join(self.tmpdir, 'f1')
with self.assertRaisesRegex(BeamIOError,
r'^Delete operation failed') as error:
FileSystems.delete([path1])
self.assertEqual(list(error.exception.exception_details.keys()), [path1])
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| |
#!/usr/bin/env python
#! This file is a literate Python program. You can compile the documentation
#! using mylit (http://pypi.python.org/pypi/mylit/).
## title = "glitter Example: OpenCL"
## stylesheet = "pygments_style.css"
# <h1><i>glitter</i> Example: OpenCL</h1>
# <h2>Summary</h2>
# This program will open a GLUT window and render an animated particle
# simulation. The particle simulation is computed in OpenCL, and the results
# are written directly to an OpenGL array buffer.
# <img src="opencl.png">
# <h2>Front matter</h2>
# <h3>Module docstring</h3>
# The module docstring is used as a description of this example in the
# generated documentation:
"""Basic OpenGL/OpenCL interoperability example.
@author: Stephan Wenger
@date: 2012-02-29
"""
# <h3>Imports</h3>
# We use numpy for creating the particle data:
import numpy
# <a href="http://mathema.tician.de/software/pyopencl">PyOpenCL</a> (<a
# href="http://documen.tician.de/pyopencl/">documentation</a>, <a
# href="http://pypi.python.org/pypi/pyopencl">PyPi</a>, <a
# href="http://sourceforge.net/projects/pyopencl/">download</a>) is a
# convenient Python wrapper for OpenCL:
import pyopencl as cl
# For creating an OpenCL context that shares data with OpenGL, we need
# <code>get_gl_sharing_context_properties</code>:
from pyopencl.tools import get_gl_sharing_context_properties
# We can usually import classes and functions contained in <i>glitter</i>
# submodules directly from glitter:
from glitter import ArrayBuffer, VertexArray, get_default_program
# Modules with external dependencies other than numpy, such as platform
# dependent parts like methods for the generation of an OpenGL context,
# however, have to be imported from their respective submodules:
from glitter.contexts.glut import GlutWindow, main_loop
# <h2>OpenCL</h2>
# <h3>OpenCL kernel</h3>
# The OpenCL kernel (adapted from a <a
# href="http://www.cmsoft.com.br/index.php?option=com_content&view=category&layout=blog&id=99&Itemid=150">CMSoft
# tutorial</a>) computes a single iteration of a simple Eulerian particle
# simulation. Kernel code can be loaded from a file or defined inline as a
# Python string:
kernel_code = """
__kernel void animate(__global float4* positions,
__global float4* colors,
__global float4* velocities,
__global float4* initial_positions,
__global float4* initial_velocities,
float dt) {
unsigned int i = get_global_id(0);
float4 p = positions[i];
float4 v = velocities[i];
float life = velocities[i].w;
life -= dt;
if (life <= 0.0) {
p = initial_positions[i];
v = initial_velocities[i];
life = 1.0;
}
v.z -= 9.8 * dt;
p.xyz += v.xyz * dt;
v.w = life;
positions[i] = p;
velocities[i] = v;
colors[i].w = life;
}
"""
# <h3>OpenCL interaction</h3>
# The <code>CLCode</code> class will comprise all OpenCL interaction:
class CLCode(object):
# <h4>Initialization</h4>
# The constructor receives OpenGL buffers for positions and colors as well
# as a numpy array of velocities and a timestep for the simulation:
def __init__(self, gl_positions, gl_colors, velocities, dt=0.001):
# First, we have to initialize the OpenCL context. That means we have
# to get a list of available platforms and select one:
platform = cl.get_platforms()[0]
# Then, we can create a context. Passing
# <code>get_gl_sharing_context_properties()</code> as a property
# ensures that we share state with the active OpenGL context:
self.ctx = cl.Context(properties=[(cl.context_properties.PLATFORM, platform)] +
get_gl_sharing_context_properties(), devices=[platform.get_devices()[0]])
# A command queue is necessary for serializing OpenCL commands:
self.queue = cl.CommandQueue(self.ctx)
# Finally, we can compile the kernel:
self.program = cl.Program(self.ctx, kernel_code).build()
# The constructor parameters are stored for later use:
self.gl_positions = gl_positions
self.gl_colors = gl_colors
self.velocities = velocities
# The <code>dt</code> value will later be passed to an OpenCL kernel as
# a 32-bit float. We therefore wrap it in a numpy <code>float32</code>
# object:
self.dt = numpy.float32(dt)
# Next, we generate OpenCL buffers. The positions and colors are
# contained in OpenGL buffers, which we wrap in PyOpenCL's
# <code>GLBuffer</code> class:
self.cl_positions = cl.GLBuffer(self.ctx, cl.mem_flags.READ_WRITE, self.gl_positions._id)
self.cl_colors = cl.GLBuffer(self.ctx, cl.mem_flags.READ_WRITE, self.gl_colors._id)
# Note how we had to extract the <code>_id</code>s from the
# <code>ArrayBuffer</code> objects. In pure <i>glitter</i> code, you
# should never (have to) access this value; however for interaction
# with other OpenGL-related libraries, this cannot always be avoided.
# The velocities are given as a numpy array, which is simply uploaded
# into a new OpenCL <code>Buffer</code> object along with the initial
# values of the positions and colors:
self.cl_velocities = cl.Buffer(self.ctx, cl.mem_flags.READ_ONLY |
cl.mem_flags.COPY_HOST_PTR, hostbuf=velocities)
self.cl_initial_positions = cl.Buffer(self.ctx, cl.mem_flags.READ_ONLY |
cl.mem_flags.COPY_HOST_PTR, hostbuf=self.gl_positions.data)
self.cl_initial_velocities = cl.Buffer(self.ctx, cl.mem_flags.READ_ONLY |
cl.mem_flags.COPY_HOST_PTR, hostbuf=self.velocities)
# <h4>Execution</h4>
# The <code>execute</code> function executes the OpenCL kernel several
# times in a row:
def execute(self, sub_intervals):
# First, we have to make sure that OpenGL is done using the buffer objects:
cl.enqueue_acquire_gl_objects(self.queue, [self.cl_positions, self.cl_colors])
# Now, we can safely call the kernel. Its arguments are buffer objects:
args = (self.cl_positions, self.cl_colors, self.cl_velocities,
self.cl_initial_positions, self.cl_initial_velocities, self.dt)
# The kernel will be executed several times with a small step size.
# This increases the accuracy with respect to a single step with a
# large step size. However, it is not necessary to display all the
# intermediate results.
for i in range(0, sub_intervals):
# In each step, the <code>animate</code> kernel function is called.
# Its arguments are the queue object that schedules its execution,
# the global and local block sizes, and any arguments that will be
# passed to the actual kernel.
self.program.animate(self.queue, [len(self.gl_positions)], None, *args)
# Finally, we allow OpenGL to access the buffers again:
cl.enqueue_release_gl_objects(self.queue, [self.cl_positions, self.cl_colors])
# <h2>Main class</h2>
# We wrap all the OpenGL interaction in a class. The class will contain an
# <code>__init__()</code> method to set up all OpenGL objects, any required
# callback methods, as well as a <code>run()</code> method to trigger execution
# of the GLUT main loop.
class OpenCLExample(object):
# <h3>Initialization</h3>
# When a <code>OpenCLExample</code> instance is created, we need to
# initialize a few OpenGL objects.
def __init__(self):
# First, we create a window; this also creates an OpenGL context.
self.window = GlutWindow(double=True, alpha=True, depth=True)
# Then, we set the GLUT display and keyboard callback functions which
# will be defined later.
self.window.display_callback = self.display
self.window.keyboard_callback = self.keyboard
# Here, we generate numpy arrays to hold the positions, colors, and
# velocities of the particles:
num = 200000
positions = numpy.empty((num, 4), dtype=numpy.float32)
colors = numpy.empty((num, 4), dtype=numpy.float32)
velocities = numpy.empty((num, 4), dtype=numpy.float32)
# So far, the array contents are undefined. We have to initialize them with meaningful values:
positions[:, 0] = numpy.sin(numpy.arange(0, num) * 2 * numpy.pi / num) * (numpy.random.random_sample((num,)) / 3 + 0.2)
positions[:, 1] = numpy.cos(numpy.arange(0, num) * 2 * numpy.pi / num) * (numpy.random.random_sample((num,)) / 3 + 0.2)
positions[:, 2:] = 0, 1
colors[:] = 0, 1, 0, 1
velocities[:, :2] = 2 * positions[:, :2]
velocities[:, 2] = 3
velocities[:, 3] = numpy.random.random_sample((num,))
# Instead of simply generating a vertex array from the position and color
# data, we first generate array buffers for them:
gl_positions = ArrayBuffer(data=positions, usage="DYNAMIC_DRAW")
gl_colors = ArrayBuffer(data=colors, usage="DYNAMIC_DRAW")
# These array buffers will later also be used by OpenCL. We do not need to
# wrap <code>velocities</code> in this way, as it will only be used by
# OpenCL and can be wrapped in an OpenCL buffer directly.
# We now create a vertex array that will pass the position and color data
# to the shader. The vertex array constructor accepts
# <code>ArrayBuffer</code> instances:
self.vao = VertexArray(gl_positions, gl_colors)
# In the OpenGL core profile, there is no such thing as a "standard pipeline"
# any more. We use the minimalistic <code>defaultpipeline</code> from the
# <code>glitter.convenience</code> module to create a shader program instead:
self.shader = get_default_program()
# Here, we create the <code>CLCode</code> object that manages OpenCL
# interaction. It is passed the OpenGL buffer objects as well as a numpy
# array of velocities.
self.clcode = CLCode(gl_positions, gl_colors, velocities)
# <h3>Callback functions</h3>
# <h4>Display function</h4>
# Here we define the display function. It will be called by GLUT whenever the
# screen has to be redrawn.
def display(self):
# First we clear the default framebuffer:
self.window.clear()
# To draw the vertex array, we use:
self.vao.draw()
# After all rendering commands have been issued, we swap the back buffer to
# the front, making the rendered image visible all at once:
self.window.swap_buffers()
# <h4>Timer function</h4>
# The animation is controlled by a GLUT timer. The timer callback animates the
# particle system, schedules the next timer event, and causes a screen redraw:
def timer(self):
# We first tell an instance of the <code>CLCode</code> class to execute the
# OpenCL kernel:
self.clcode.execute(10)
# The following line schedules the next timer event to execute after one millisecond.
self.window.add_timer(1, self.timer)
# Finally, we tell GLUT to redraw the screen.
self.window.post_redisplay()
# <h4>Keyboard function</h4>
# To further illustrate the concept of GLUT callbacks, here's a keyboard
# handler that will simply make the program exit when any key is pressed:
def keyboard(self, key, x, y):
raise SystemExit
# <h3>Running</h3>
# We will call the <code>run()</code> method later to run the OpenGL code.
def run(self):
# To start the animation, we call the timer once; all subsequent timer
# calls will be scheduled by the timer function itself.
self.timer()
# The default program is bound by using a <code>with</code> statement. At
# the same time, we can pass in additional uniform variables, such as the
# modelview matrix:
with self.shader(modelview_matrix=((1, 0, 0, 0), (0, 0, 1, 0), (0, 1, 0, 0), (0, 0, 0, 2))):
# With the shader bound, we enter the GLUT main loop.
main_loop()
# When the main loop exits, control is handed back to the script,
# unless <code>SystemExit</code> has been raised by the keyboard
# handler.
# <h2>Main section</h2>
# Finally, if this program is being run from the command line, we instanciate
# the main class and run it.
if __name__ == "__main__":
OpenCLExample().run()
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .. import models
class PacketCapturesOperations(object):
"""PacketCapturesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client API version. Constant value: "2017-03-01".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-03-01"
self.config = config
def create(
self, resource_group_name, network_watcher_name, packet_capture_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Create and start a packet capture on the specified VM.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:param parameters: Parameters that define the create packet capture
operation.
:type parameters: :class:`PacketCapture
<azure.mgmt.network.v2017_03_01.models.PacketCapture>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`PacketCaptureResult
<azure.mgmt.network.v2017_03_01.models.PacketCaptureResult>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'PacketCapture')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize('PacketCaptureResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, network_watcher_name, packet_capture_name, custom_headers=None, raw=False, **operation_config):
"""Gets a packet capture session by name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`PacketCaptureResult
<azure.mgmt.network.v2017_03_01.models.PacketCaptureResult>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PacketCaptureResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self, resource_group_name, network_watcher_name, packet_capture_name, custom_headers=None, raw=False, **operation_config):
"""Deletes the specified packet capture session.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.delete(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [204, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def stop(
self, resource_group_name, network_watcher_name, packet_capture_name, custom_headers=None, raw=False, **operation_config):
"""Stops a specified packet capture session.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/stop'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get_status(
self, resource_group_name, network_watcher_name, packet_capture_name, custom_headers=None, raw=False, **operation_config):
"""Query the status of a running packet capture session.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param packet_capture_name: The name given to the packet capture
session.
:type packet_capture_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`PacketCaptureQueryStatusResult
<azure.mgmt.network.v2017_03_01.models.PacketCaptureQueryStatusResult>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/queryStatus'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PacketCaptureQueryStatusResult', response)
if response.status_code == 202:
deserialized = self._deserialize('PacketCaptureQueryStatusResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def list(
self, resource_group_name, network_watcher_name, custom_headers=None, raw=False, **operation_config):
"""Lists all packet capture sessions within the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`PacketCaptureResultPaged
<azure.mgmt.network.v2017_03_01.models.PacketCaptureResultPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.PacketCaptureResultPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.PacketCaptureResultPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
| |
import sys
from ..pakbase import Package
class Mt3dAdv(Package):
"""
MT3DMS Advection Package Class.
Parameters
----------
model : model object
The model object (of type :class:`flopy.mt3d.mt.Mt3dms`) to which
this package will be added.
mixelm : int
MIXELM is an integer flag for the advection solution option.
MIXELM = 0, the standard finite-difference method with upstream or
central-in-space weighting, depending on the value of NADVFD;
= 1, the forward-tracking method of characteristics (MOC);
= 2, the backward-tracking modified method of characteristics (MMOC);
= 3, the hybrid method of characteristics (HMOC) with MOC or MMOC
automatically and dynamically selected;
= -1, the third-order TVD scheme (ULTIMATE).
percel : float
PERCEL is the Courant number (i.e., the number of cells, or a
fraction of a cell) advection will be allowed in any direction in one
transport step.
For implicit finite-difference or particle-tracking-based schemes,
there is no limit on PERCEL, but for accuracy reasons, it is generally
not set much greater than one. Note, however, that the PERCEL limit is
checked over the entire model grid. Thus, even if PERCEL > 1,
advection may not be more than one cell's length at most model
locations.
For the explicit finite-difference or the third-order TVD scheme,
PERCEL is also a stability constraint which must not exceed one and
will be automatically reset to one if a value greater than one is
specified.
mxpart : int
MXPART is the maximum total number of moving particles allowed and is
used only when MIXELM = 1 or 3.
nadvfd : int
NADVFD is an integer flag indicating which weighting scheme should be
used; it is needed only when the advection term is solved using the
implicit finite- difference method.
NADVFD = 0 or 1, upstream weighting (default); = 2,central-in-space
weighting.
itrack : int
ITRACK is a flag indicating which particle-tracking algorithm is
selected for the Eulerian-Lagrangian methods.
ITRACK = 1, the first-order Euler algorithm is used.
= 2, the fourth-order Runge-Kutta algorithm is used; this option is
computationally demanding and may be needed only when PERCEL is set
greater than one.
= 3, the hybrid first- and fourth-order algorithm is used; the
Runge-Kutta algorithm is used in sink/source cells and the cells next
to sinks/sources while the Euler algorithm is used elsewhere.
wd : float
is a concentration weighting factor between 0.5 and 1. It is used for
operator splitting in the particle- tracking-based methods. The value
of 0.5 is generally adequate. The value of WD may be adjusted to
achieve better mass balance. Generally, it can be increased toward
1.0 as advection becomes more dominant.
dceps : float
is a small Relative Cell Concentration Gradient below which advective
transport is considered
nplane : int
NPLANE is a flag indicating whether the random or
fixed pattern is selected for initial placement of moving particles.
If NPLANE = 0, the random pattern is selected for initial placement.
Particles are distributed randomly in both the horizontal and vertical
directions by calling a random number generator (Figure 18b). This
option is usually preferred and leads to smaller mass balance
discrepancy in nonuniform or diverging/converging flow fields.
If NPLANE > 0, the fixed pattern is selected for initial placement.
The value of NPLANE serves as the number of vertical 'planes' on
which initial particles are placed within each cell block (Figure 18a).
The fixed pattern may work better than the random pattern only in
relatively uniform flow fields. For two-dimensional simulations in
plan view, set NPLANE = 1. For cross sectional or three-dimensional
simulations, NPLANE = 2 is normally adequate. Increase NPLANE if more
resolution in the vertical direction is desired.
npl : int
NPL is the number of initial particles per cell to be placed at cells
where the Relative Cell Concentration Gradient is less than or equal
to DCEPS. Generally, NPL can be set to zero since advection is
considered insignificant when the Relative Cell Concentration Gradient
is less than or equal to DCEPS. Setting NPL equal to NPH causes a
uniform number of particles to be placed in every cell over the entire
grid (i.e., the uniform approach).
nph : int
NPH is the number of initial particles per cell to be placed at cells
where the Relative Cell Concentration Gradient is greater than DCEPS.
The selection of NPH depends on the nature of the flow field and also
the computer memory limitation. Generally, a smaller number should be
used in relatively uniform flow fields and a larger number should be
used in relatively nonuniform flow fields. However, values exceeding
16 in two-dimensional simulation or 32 in three- dimensional
simulation are rarely necessary. If the random pattern is chosen, NPH
particles are randomly distributed within the cell block. If the fixed
pattern is chosen, NPH is divided by NPLANE to yield the number of
particles to be placed per vertical plane, which is rounded to one of
the values shown in Figure 30.
npmin : int
is the minimum number of particles allowed per cell. If the number of
particles in a cell at the end of a transport step is fewer than
NPMIN, new particles are inserted into that cell to maintain a
sufficient number of particles. NPMIN can be set to zero in relatively
uniform flow fields and to a number greater than zero in
diverging/converging flow fields. Generally, a value between zero and
four is adequate.
npmax : int
NPMAX is the maximum number of particles allowed per cell. If the
number of particles in a cell exceeds NPMAX, all particles are removed
from that cell and replaced by a new set of particles equal to NPH to
maintain mass balance. Generally, NPMAX can be set to approximately
two times of NPH.
interp : int
is a flag indicating the concentration interpolation method for use in
the MMOC scheme. Currently, only linear interpolation is implemented.
nlsink : int
s a flag indicating whether the random or fixed pattern is selected
for initial placement of particles to approximate sink cells in the
MMOC scheme. The convention is the same as that for NPLANE. It is
generally adequate to set NLSINK equivalent to NPLANE.
npsink : int
is the number of particles used to approximate sink cells in the MMOC
scheme. The convention is the same as that for NPH. It is generally
adequate to set NPSINK equivalent to NPH.
dchmoc : float
DCHMOC is the critical Relative Concentration Gradient for
controlling the selective use of either MOC or MMOC in the HMOC
solution scheme.
The MOC solution is selected at cells where the Relative
Concentration Gradient is greater than DCHMOC.
The MMOC solution is selected at cells where the Relative
Concentration Gradient is less than or equal to DCHMOC.
extension : string
Filename extension (default is 'adv')
unitnumber : int
File unit number (default is None).
filenames : str or list of str
Filenames to use for the package. If filenames=None the package name
will be created using the model name and package extension. If a
single string is passed the package will be set to the string.
Default is None.
Attributes
----------
Methods
-------
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> m = flopy.mt3d.Mt3dms()
>>> adv = flopy.mt3d.Mt3dAdv(m)
"""
def __init__(self, model, mixelm=3, percel=0.75, mxpart=800000, nadvfd=1,
itrack=3, wd=0.5,
dceps=1e-5, nplane=2, npl=10, nph=40, npmin=5, npmax=80,
nlsink=0, npsink=15,
dchmoc=0.0001, extension='adv', unitnumber=None,
filenames=None):
if unitnumber is None:
unitnumber = Mt3dAdv.defaultunit()
elif unitnumber == 0:
unitnumber = Mt3dAdv.reservedunit()
# set filenames
if filenames is None:
filenames = [None]
elif isinstance(filenames, str):
filenames = [filenames]
# Fill namefile items
name = [Mt3dAdv.ftype()]
units = [unitnumber]
extra = ['']
# set package name
fname = [filenames[0]]
# Call ancestor's init to set self.parent, extension, name and unit number
Package.__init__(self, model, extension=extension, name=name,
unit_number=units, extra=extra, filenames=fname)
self.mixelm = mixelm
self.percel = percel
self.mxpart = mxpart
self.nadvfd = nadvfd
self.mixelm = mixelm
self.itrack = itrack
self.wd = wd
self.dceps = dceps
self.nplane = nplane
self.npl = npl
self.nph = nph
self. npmin = npmin
self.npmax = npmax
self.interp = 1 # Command-line 'interp' might once be needed if MT3DMS is updated to include other interpolation method
self.nlsink = nlsink
self.npsink = npsink
self.dchmoc = dchmoc
self.parent.add_package(self)
return
def write_file(self):
"""
Write the package file
Returns
-------
None
"""
f_adv = open(self.fn_path, 'w')
f_adv.write('%10i%10f%10i%10i\n' % (self.mixelm, self.percel,
self.mxpart, self.nadvfd))
if (self.mixelm > 0):
f_adv.write('%10i%10f\n' % (self.itrack, self.wd))
if ((self.mixelm == 1) or (self.mixelm == 3)):
f_adv.write('%10.4e%10i%10i%10i%10i%10i\n' % (self.dceps,
self.nplane, self.npl, self.nph, self. npmin,
self.npmax))
if ((self.mixelm == 2) or (self.mixelm == 3)):
f_adv.write('%10i%10i%10i\n' % (self.interp, self.nlsink,
self.npsink))
if (self.mixelm == 3):
f_adv.write('%10f\n' % (self.dchmoc))
f_adv.close()
return
@staticmethod
def load(f, model, ext_unit_dict=None):
"""
Load an existing package.
Parameters
----------
f : filename or file handle
File to load.
model : model object
The model object (of type :class:`flopy.mt3d.mt.Mt3dms`) to
which this package will be added.
ext_unit_dict : dictionary, optional
If the arrays in the file are specified using EXTERNAL,
or older style array control records, then `f` should be a file
handle. In this case ext_unit_dict is required, which can be
constructed using the function
:class:`flopy.utils.mfreadnam.parsenamefile`.
Returns
-------
adv : Mt3dAdv object
Mt3dAdv object.
Examples
--------
>>> import flopy
>>> mt = flopy.mt3d.Mt3dms()
>>> adv = flopy.mt3d.Mt3dAdv.load('test.adv', m)
"""
if model.verbose:
sys.stdout.write('loading adv package file...\n')
# Open file, if necessary
if not hasattr(f, 'read'):
filename = f
f = open(filename, 'r')
# Dataset 0 -- comment line
while True:
line = f.readline()
if line[0] != '#':
break
# Item B1: MIXELM, PERCEL, MXPART, NADVFD - line already read above
if model.verbose:
print(' loading MIXELM, PERCEL, MXPART, NADVFD...')
mixelm = int(line[0:10])
percel = float(line[10:20])
mxpart = 0
if mixelm == 1 or mixelm == 3:
if len(line[20:30].strip()) > 0:
mxpart = int(line[20:30])
nadvfd = 0
if mixelm == 0:
if len(line[30:40].strip()) > 0:
nadvfd = int(line[30:40])
if model.verbose:
print(' MIXELM {}'.format(mixelm))
print(' PERCEL {}'.format(nadvfd))
print(' MXPART {}'.format(mxpart))
print(' NADVFD {}'.format(nadvfd))
# Item B2: ITRACK WD
itrack = None
wd = None
if mixelm == 1 or mixelm == 2 or mixelm == 3:
if model.verbose:
print(' loading ITRACK, WD...')
line = f.readline()
itrack = int(line[0:10])
wd = float(line[10:20])
if model.verbose:
print(' ITRACK {}'.format(itrack))
print(' WD {}'.format(wd))
# Item B3: DCEPS, NPLANE, NPL, NPH, NPMIN, NPMAX
dceps = None
nplane = None
npl = None
nph = None
npmin = None
npmax = None
if mixelm == 1 or mixelm == 3:
if model.verbose:
print(' loading DCEPS, NPLANE, NPL, NPH, NPMIN, NPMAX...')
line = f.readline()
dceps = float(line[0:10])
nplane = int(line[10:20])
npl = int(line[20:30])
nph = int(line[30:40])
npmin = int(line[40:50])
npmax = int(line[50:60])
if model.verbose:
print(' DCEPS {}'.format(dceps))
print(' NPLANE {}'.format(nplane))
print(' NPL {}'.format(npl))
print(' NPH {}'.format(nph))
print(' NPMIN {}'.format(npmin))
print(' NPMAX {}'.format(npmax))
# Item B4: INTERP, NLSINK, NPSINK
interp = None
nlsink = None
npsink = None
if mixelm == 2 or mixelm == 3:
if model.verbose:
print(' loading INTERP, NLSINK, NPSINK...')
line = f.readline()
interp = int(line[0:10])
nlsink = int(line[10:20])
npsink = int(line[20:30])
if model.verbose:
print(' INTERP {}'.format(interp))
print(' NLSINK {}'.format(nlsink))
print(' NPSINK {}'.format(npsink))
# Item B5: DCHMOC
dchmoc = None
if mixelm == 3:
if model.verbose:
print(' loading DCHMOC...')
line = f.readline()
dchmoc = float(line[0:10])
if model.verbose:
print(' DCHMOC {}'.format(dchmoc))
# set package unit number
unitnumber = None
filenames = [None]
if ext_unit_dict is not None:
unitnumber, filenames[0] = \
model.get_ext_dict_attr(ext_unit_dict,
filetype=Mt3dAdv.ftype())
# Construct and return adv package
adv = Mt3dAdv(model, mixelm=mixelm, percel=percel,
mxpart=mxpart, nadvfd=nadvfd,
itrack=itrack, wd=wd,
dceps=dceps, nplane=nplane, npl=npl, nph=nph,
npmin=npmin, npmax=npmax,
nlsink=nlsink, npsink=npsink,
dchmoc=dchmoc, unitnumber=unitnumber,
filenames=filenames)
return adv
@staticmethod
def ftype():
return 'ADV'
@staticmethod
def defaultunit():
return 32
@staticmethod
def reservedunit():
return 2
| |
"""Base classes for cursors.
These classes centralize common code.
"""
from vtdb import dbexceptions
class BasePEP0249Cursor(object):
"""Cursor with common PEP0249 implementations."""
def __init__(self):
self._clear_common_state()
self._conn = None
def callproc(self):
"""For PEP 0249."""
raise dbexceptions.NotSupportedError
def executemany(self, *pargs):
"""For PEP 0249."""
_ = pargs
raise dbexceptions.NotSupportedError
def nextset(self):
"""For PEP 0249."""
raise dbexceptions.NotSupportedError
def setinputsizes(self, sizes):
"""For PEP 0249."""
_ = sizes
def setoutputsize(self, size, column=None):
"""For PEP 0249."""
_ = size, column
@property
def rownumber(self):
return self.index
def __iter__(self):
"""For PEP 0249: To make cursors compatible to the iteration protocol."""
return self
def next(self):
"""For PEP 0249."""
val = self.fetchone()
if val is None:
raise StopIteration
return val
def close(self):
"""For PEP 0249."""
raise NotImplementedError
def fetchone(self):
"""For PEP 0249."""
raise NotImplementedError
def fetchmany(self, size=None):
"""For PEP 0249."""
raise NotImplementedError
def fetchall(self):
"""For PEP 0249."""
raise NotImplementedError
def _clear_common_state(self):
self.index = 0
def _get_conn(self):
if not self._conn:
raise dbexceptions.ProgrammingError(
'Cannot use closed cursor %s.' % self.__class__)
return self._conn
class BaseListCursor(BasePEP0249Cursor):
"""Base cursor where results are stored as a list.
Execute call should return a (results, rowcount, lastrowid,
description) tuple. The fetch commands traverse self.results.
"""
arraysize = 1
def __init__(self):
super(BaseListCursor, self).__init__()
self._clear_list_state()
self.effective_caller_id = None
def _clear_list_state(self):
self._clear_common_state()
self.description = None
self.lastrowid = None
self.rowcount = None
self.results = None
def set_effective_caller_id(self, effective_caller_id):
"""Set the effective caller id that will be used in upcoming calls."""
self.effective_caller_id = effective_caller_id
def begin(self):
return self._get_conn().begin(self.effective_caller_id)
def commit(self):
return self._get_conn().commit()
def rollback(self):
return self._get_conn().rollback()
def _check_fetch(self):
if self.results is None:
raise dbexceptions.ProgrammingError('Fetch called before execute.')
def _handle_transaction_sql(self, sql):
sql_check = sql.strip().lower()
if sql_check == 'begin':
self.set_effective_caller_id(self.effective_caller_id)
self.begin()
return True
elif sql_check == 'commit':
self.commit()
return True
elif sql_check == 'rollback':
self.rollback()
return True
else:
return False
def close(self):
self._clear_list_state()
self._conn = None
def fetchone(self):
self._check_fetch()
if self.index >= len(self.results):
return None
self.index += 1
return self.results[self.index - 1]
def fetchmany(self, size=None):
self._check_fetch()
if self.index >= len(self.results):
return []
if size is None:
size = self.arraysize
res = self.results[self.index:self.index + size]
self.index += size
return res
def fetchall(self):
self._check_fetch()
return self.fetchmany(len(self.results) - self.index)
class BaseStreamCursor(BasePEP0249Cursor):
"""Base cursor where results are returned as a generator.
This supports large queries. An execute call returns a (generator,
description) pair. The fetch functions read items from the generator
until it is exhausted.
"""
arraysize = 1
def __init__(self):
super(BaseStreamCursor, self).__init__()
self._clear_stream_state()
self.effective_caller_id = None
def set_effective_caller_id(self, effective_caller_id):
"""Set the effective caller id that will be used in upcoming calls."""
self.effective_caller_id = effective_caller_id
def _clear_stream_state(self):
self._clear_common_state()
self.description = None
self.generator = None
def fetchone(self):
if self.description is None:
raise dbexceptions.ProgrammingError('Fetch called before execute.')
self.index += 1
try:
return self.generator.next()
except StopIteration:
return None
# fetchmany can be called until it returns no rows. Returning less rows
# than what we asked for is also an indication we ran out, but the cursor
# API in PEP249 is silent about that.
def fetchmany(self, size=None):
if size is None:
size = self.arraysize
result = []
for _ in xrange(size):
row = self.fetchone()
if row is None:
break
result.append(row)
return result
def fetchall(self):
result = []
while True:
row = self.fetchone()
if row is None:
break
result.append(row)
return result
def close(self):
if self.generator:
self.generator.close()
self._clear_stream_state()
self._conn = None
| |
#!/usr/bin/env python
#
# Electrum - Lightweight Bitcoin Client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from functools import partial
from threading import Thread
import re
from decimal import Decimal
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from electrum_arg_gui.qt.util import *
from electrum_arg_gui.qt.qrcodewidget import QRCodeWidget
from electrum_arg_gui.qt.amountedit import AmountEdit
from electrum_arg_gui.qt.main_window import StatusBarButton
from electrum_arg.i18n import _
from electrum_arg.plugins import hook
from trustedcoin import TrustedCoinPlugin, server
class Plugin(TrustedCoinPlugin):
@hook
def on_new_window(self, window):
wallet = window.wallet
if not isinstance(wallet, self.wallet_class):
return
if wallet.can_sign_without_server():
msg = ' '.join([
_('This wallet is was restored from seed, and it contains two master private keys.'),
_('Therefore, two-factor authentication is disabled.')
])
action = lambda: window.show_message(msg)
else:
action = partial(self.settings_dialog, window)
button = StatusBarButton(QIcon(":icons/trustedcoin-status.png"),
_("TrustedCoin"), action)
window.statusBar().addPermanentWidget(button)
t = Thread(target=self.request_billing_info, args=(wallet,))
t.setDaemon(True)
t.start()
def auth_dialog(self, window):
d = WindowModalDialog(window, _("Authorization"))
vbox = QVBoxLayout(d)
pw = AmountEdit(None, is_int = True)
msg = _('Please enter your Google Authenticator code')
vbox.addWidget(QLabel(msg))
grid = QGridLayout()
grid.setSpacing(8)
grid.addWidget(QLabel(_('Code')), 1, 0)
grid.addWidget(pw, 1, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
return pw.get_amount()
@hook
def sign_tx(self, window, tx):
wallet = window.wallet
if not isinstance(wallet, self.wallet_class):
return
if not wallet.can_sign_without_server():
self.print_error("twofactor:sign_tx")
auth_code = None
if wallet.keystores['x3/'].get_tx_derivations(tx):
auth_code = self.auth_dialog(window)
else:
self.print_error("twofactor: xpub3 not needed")
window.wallet.auth_code = auth_code
def waiting_dialog(self, window, on_finished=None):
task = partial(self.request_billing_info, window.wallet)
return WaitingDialog(window, 'Getting billing information...', task,
on_finished)
@hook
def abort_send(self, window):
wallet = window.wallet
if not isinstance(wallet, self.wallet_class):
return
if not wallet.can_sign_without_server():
if wallet.billing_info is None:
# request billing info before forming the transaction
waiting_dialog(self, window).wait()
if wallet.billing_info is None:
window.show_message('Could not contact server')
return True
return False
def settings_dialog(self, window):
self.waiting_dialog(window, partial(self.show_settings_dialog, window))
def show_settings_dialog(self, window, success):
if not success:
window.show_message(_('Server not reachable.'))
return
wallet = window.wallet
d = WindowModalDialog(window, _("TrustedCoin Information"))
d.setMinimumSize(500, 200)
vbox = QVBoxLayout(d)
hbox = QHBoxLayout()
logo = QLabel()
logo.setPixmap(QPixmap(":icons/trustedcoin-status.png"))
msg = _('This wallet is protected by TrustedCoin\'s two-factor authentication.') + '<br/>'\
+ _("For more information, visit") + " <a href=\"https://api.trustedcoin.com/#/electrum-help\">https://api.trustedcoin.com/#/electrum-help</a>"
label = QLabel(msg)
label.setOpenExternalLinks(1)
hbox.addStretch(10)
hbox.addWidget(logo)
hbox.addStretch(10)
hbox.addWidget(label)
hbox.addStretch(10)
vbox.addLayout(hbox)
vbox.addStretch(10)
msg = _('TrustedCoin charges a small fee to co-sign transactions. The fee depends on how many prepaid transactions you buy. An extra output is added to your transaction everytime you run out of prepaid transactions.') + '<br/>'
label = QLabel(msg)
label.setWordWrap(1)
vbox.addWidget(label)
vbox.addStretch(10)
grid = QGridLayout()
vbox.addLayout(grid)
price_per_tx = wallet.price_per_tx
n_prepay = wallet.num_prepay(self.config)
i = 0
for k, v in sorted(price_per_tx.items()):
if k == 1:
continue
grid.addWidget(QLabel("Pay every %d transactions:"%k), i, 0)
grid.addWidget(QLabel(window.format_amount(v/k) + ' ' + window.base_unit() + "/tx"), i, 1)
b = QRadioButton()
b.setChecked(k == n_prepay)
b.clicked.connect(lambda b, k=k: self.config.set_key('trustedcoin_prepay', k, True))
grid.addWidget(b, i, 2)
i += 1
n = wallet.billing_info.get('tx_remaining', 0)
grid.addWidget(QLabel(_("Your wallet has %d prepaid transactions.")%n), i, 0)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def on_buy(self, window, k, v, d):
d.close()
if window.pluginsdialog:
window.pluginsdialog.close()
wallet = window.wallet
uri = "argentum:" + wallet.billing_info['billing_address'] + "?message=TrustedCoin %d Prepaid Transactions&amount="%k + str(Decimal(v)/100000000)
wallet.is_billing = True
window.pay_to_URI(uri)
window.payto_e.setFrozen(True)
window.message_e.setFrozen(True)
window.amount_e.setFrozen(True)
def accept_terms_of_use(self, window):
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Terms of Service")))
tos_e = QTextEdit()
tos_e.setReadOnly(True)
vbox.addWidget(tos_e)
vbox.addWidget(QLabel(_("Please enter your e-mail address")))
email_e = QLineEdit()
vbox.addWidget(email_e)
next_button = window.next_button
prior_button_text = next_button.text()
next_button.setText(_('Accept'))
def request_TOS():
tos = server.get_terms_of_service()
self.TOS = tos
window.emit(SIGNAL('twofactor:TOS'))
def on_result():
tos_e.setText(self.TOS)
def set_enabled():
next_button.setEnabled(re.match(regexp,email_e.text()) is not None)
window.connect(window, SIGNAL('twofactor:TOS'), on_result)
t = Thread(target=request_TOS)
t.setDaemon(True)
t.start()
regexp = r"[^@]+@[^@]+\.[^@]+"
email_e.textChanged.connect(set_enabled)
email_e.setFocus(True)
window.exec_layout(vbox, next_enabled=False)
next_button.setText(prior_button_text)
return str(email_e.text())
def request_otp_dialog(self, window, _id, otp_secret):
vbox = QVBoxLayout()
if otp_secret is not None:
uri = "otpauth://totp/%s?secret=%s"%('trustedcoin.com', otp_secret)
l = QLabel("Please scan the following QR code in Google Authenticator. You may as well use the following key: %s"%otp_secret)
l.setWordWrap(True)
vbox.addWidget(l)
qrw = QRCodeWidget(uri)
vbox.addWidget(qrw, 1)
msg = _('Then, enter your Google Authenticator code:')
else:
label = QLabel(
"This wallet is already registered with Trustedcoin. "
"To finalize wallet creation, please enter your Google Authenticator Code. "
)
label.setWordWrap(1)
vbox.addWidget(label)
msg = _('Google Authenticator code:')
hbox = QHBoxLayout()
hbox.addWidget(WWLabel(msg))
pw = AmountEdit(None, is_int = True)
pw.setFocus(True)
pw.setMaximumWidth(50)
hbox.addWidget(pw)
vbox.addLayout(hbox)
cb_lost = QCheckBox(_("I have lost my Google Authenticator account"))
cb_lost.setToolTip(_("Check this box to request a new secret. You will need to retype your seed."))
vbox.addWidget(cb_lost)
cb_lost.setVisible(otp_secret is None)
def set_enabled():
b = True if cb_lost.isChecked() else len(pw.text()) == 6
window.next_button.setEnabled(b)
pw.textChanged.connect(set_enabled)
cb_lost.toggled.connect(set_enabled)
window.exec_layout(vbox, next_enabled=False,
raise_on_cancel=False)
return pw.get_amount(), cb_lost.isChecked()
| |
from collections.abc import Callable
from copy import deepcopy
from io import StringIO
import openmc.checkvalue as cv
from openmc.mixin import EqualityMixin
from .data import EV_PER_MEV
from .endf import get_cont_record, get_list_record, get_tab1_record, Evaluation
from .function import Function1D, Tabulated1D, Polynomial, sum_functions
_NAMES = (
'fragments', 'prompt_neutrons', 'delayed_neutrons',
'prompt_photons', 'delayed_photons', 'betas',
'neutrinos', 'recoverable', 'total'
)
class FissionEnergyRelease(EqualityMixin):
"""Energy relased by fission reactions.
Energy is carried away from fission reactions by many different particles.
The attributes of this class specify how much energy is released in the form
of fission fragments, neutrons, photons, etc. Each component is also (in
general) a function of the incident neutron energy.
Following a fission reaction, most of the energy release is carried by the
daughter nuclei fragments. These fragments accelerate apart from the
Coulomb force on the time scale of ~10^-20 s [1]. Those fragments emit
prompt neutrons between ~10^-18 and ~10^-13 s after scission (although some
prompt neutrons may come directly from the scission point) [1]. Prompt
photons follow with a time scale of ~10^-14 to ~10^-7 s [1]. The fission
products then emit delayed neutrons with half lives between 0.1 and 100 s.
The remaining fission energy comes from beta decays of the fission products
which release beta particles, photons, and neutrinos (that escape the
reactor and do not produce usable heat).
Use the class methods to instantiate this class from an HDF5 or ENDF
dataset. The :meth:`FissionEnergyRelease.from_hdf5` method builds this
class from the usual OpenMC HDF5 data files.
:meth:`FissionEnergyRelease.from_endf` uses ENDF-formatted data.
References
----------
[1] D. G. Madland, "Total prompt energy release in the neutron-induced
fission of ^235U, ^238U, and ^239Pu", Nuclear Physics A 772:113--137 (2006).
<http://dx.doi.org/10.1016/j.nuclphysa.2006.03.013>
Attributes
----------
fragments : Callable
Function that accepts incident neutron energy value(s) and returns the
kinetic energy of the fission daughter nuclides (after prompt neutron
emission).
prompt_neutrons : Callable
Function of energy that returns the kinetic energy of prompt fission
neutrons.
delayed_neutrons : Callable
Function of energy that returns the kinetic energy of delayed neutrons
emitted from fission products.
prompt_photons : Callable
Function of energy that returns the kinetic energy of prompt fission
photons.
delayed_photons : Callable
Function of energy that returns the kinetic energy of delayed photons.
betas : Callable
Function of energy that returns the kinetic energy of delayed beta
particles.
neutrinos : Callable
Function of energy that returns the kinetic energy of neutrinos.
recoverable : Callable
Function of energy that returns the kinetic energy of all products that
can be absorbed in the reactor (all of the energy except for the
neutrinos).
total : Callable
Function of energy that returns the kinetic energy of all products.
q_prompt : Callable
Function of energy that returns the prompt fission Q-value (fragments +
prompt neutrons + prompt photons - incident neutron energy).
q_recoverable : Callable
Function of energy that returns the recoverable fission Q-value
(total release - neutrinos - incident neutron energy). This value is
sometimes referred to as the pseudo-Q-value.
q_total : Callable
Function of energy that returns the total fission Q-value (total release
- incident neutron energy).
"""
def __init__(self, fragments, prompt_neutrons, delayed_neutrons,
prompt_photons, delayed_photons, betas, neutrinos):
self.fragments = fragments
self.prompt_neutrons = prompt_neutrons
self.delayed_neutrons = delayed_neutrons
self.prompt_photons = prompt_photons
self.delayed_photons = delayed_photons
self.betas = betas
self.neutrinos = neutrinos
@property
def fragments(self):
return self._fragments
@property
def prompt_neutrons(self):
return self._prompt_neutrons
@property
def delayed_neutrons(self):
return self._delayed_neutrons
@property
def prompt_photons(self):
return self._prompt_photons
@property
def delayed_photons(self):
return self._delayed_photons
@property
def betas(self):
return self._betas
@property
def neutrinos(self):
return self._neutrinos
@property
def recoverable(self):
components = ['fragments', 'prompt_neutrons', 'delayed_neutrons',
'prompt_photons', 'delayed_photons', 'betas']
return sum_functions(getattr(self, c) for c in components)
@property
def total(self):
components = ['fragments', 'prompt_neutrons', 'delayed_neutrons',
'prompt_photons', 'delayed_photons', 'betas',
'neutrinos']
return sum_functions(getattr(self, c) for c in components)
@property
def q_prompt(self):
# Use a polynomial to subtract incident energy.
funcs = [self.fragments, self.prompt_neutrons, self.prompt_photons,
Polynomial((0.0, -1.0))]
return sum_functions(funcs)
@property
def q_recoverable(self):
# Use a polynomial to subtract incident energy.
return sum_functions([self.recoverable, Polynomial((0.0, -1.0))])
@property
def q_total(self):
# Use a polynomial to subtract incident energy.
return sum_functions([self.total, Polynomial((0.0, -1.0))])
@fragments.setter
def fragments(self, energy_release):
cv.check_type('fragments', energy_release, Callable)
self._fragments = energy_release
@prompt_neutrons.setter
def prompt_neutrons(self, energy_release):
cv.check_type('prompt_neutrons', energy_release, Callable)
self._prompt_neutrons = energy_release
@delayed_neutrons.setter
def delayed_neutrons(self, energy_release):
cv.check_type('delayed_neutrons', energy_release, Callable)
self._delayed_neutrons = energy_release
@prompt_photons.setter
def prompt_photons(self, energy_release):
cv.check_type('prompt_photons', energy_release, Callable)
self._prompt_photons = energy_release
@delayed_photons.setter
def delayed_photons(self, energy_release):
cv.check_type('delayed_photons', energy_release, Callable)
self._delayed_photons = energy_release
@betas.setter
def betas(self, energy_release):
cv.check_type('betas', energy_release, Callable)
self._betas = energy_release
@neutrinos.setter
def neutrinos(self, energy_release):
cv.check_type('neutrinos', energy_release, Callable)
self._neutrinos = energy_release
@classmethod
def from_endf(cls, ev, incident_neutron):
"""Generate fission energy release data from an ENDF file.
Parameters
----------
ev : openmc.data.endf.Evaluation
ENDF evaluation
incident_neutron : openmc.data.IncidentNeutron
Corresponding incident neutron dataset
Returns
-------
openmc.data.FissionEnergyRelease
Fission energy release data
"""
cv.check_type('evaluation', ev, Evaluation)
# Check to make sure this ENDF file matches the expected isomer.
if ev.target['atomic_number'] != incident_neutron.atomic_number:
raise ValueError('The atomic number of the ENDF evaluation does '
'not match the given IncidentNeutron.')
if ev.target['mass_number'] != incident_neutron.mass_number:
raise ValueError('The atomic mass of the ENDF evaluation does '
'not match the given IncidentNeutron.')
if ev.target['isomeric_state'] != incident_neutron.metastable:
raise ValueError('The metastable state of the ENDF evaluation '
'does not match the given IncidentNeutron.')
if not ev.target['fissionable']:
raise ValueError('The ENDF evaluation is not fissionable.')
if (1, 458) not in ev.section:
raise ValueError('ENDF evaluation does not have MF=1, MT=458.')
file_obj = StringIO(ev.section[1, 458])
# Read first record and check whether any components appear as
# tabulated functions
items = get_cont_record(file_obj)
lfc = items[3]
nfc = items[5]
# Parse the ENDF LIST into an array.
items, data = get_list_record(file_obj)
npoly = items[3]
# Associate each set of values and uncertainties with its label.
functions = {}
for i, name in enumerate(_NAMES):
coeffs = data[2*i::18]
# Ignore recoverable and total since we recalculate those directly
if name in ('recoverable', 'total'):
continue
# In ENDF/B-VII.1, data for 2nd-order coefficients were mistakenly
# not converted from MeV to eV. Check for this error and fix it if
# present.
if npoly == 2: # Only check 2nd-order data.
# If a 5 MeV neutron causes a change of more than 100 MeV, we
# know something is wrong.
second_order = coeffs[2]
if abs(second_order) * (5e6)**2 > 1e8:
# If we found the error, reduce 2nd-order coeff by 10**6.
coeffs[2] /= EV_PER_MEV
# If multiple coefficients were given, we can create the polynomial
# and move on to the next component
if npoly > 0:
functions[name] = Polynomial(coeffs)
continue
# If a single coefficient was given, we need to use the Sher-Beck
# formula for energy dependence
zeroth_order = coeffs[0]
if name in ('delayed_photons', 'betas'):
func = Polynomial((zeroth_order, -0.075))
elif name == 'neutrinos':
func = Polynomial((zeroth_order, -0.105))
elif name == 'prompt_neutrons':
# Prompt neutrons require nu-data. It is not clear from
# ENDF-102 whether prompt or total nu value should be used, but
# the delayed neutron fraction is so small that the difference
# is negligible. MT=18 (n, fission) might not be available so
# try MT=19 (n, f) as well.
if 18 in incident_neutron and not incident_neutron[18].redundant:
nu = [p.yield_ for p in incident_neutron[18].products
if p.particle == 'neutron'
and p.emission_mode in ('prompt', 'total')]
elif 19 in incident_neutron:
nu = [p.yield_ for p in incident_neutron[19].products
if p.particle == 'neutron'
and p.emission_mode in ('prompt', 'total')]
else:
raise ValueError('IncidentNeutron data has no fission '
'reaction.')
if len(nu) == 0:
raise ValueError(
'Nu data is needed to compute fission energy '
'release with the Sher-Beck format.'
)
if len(nu) > 1:
raise ValueError('Ambiguous prompt/total nu value.')
nu = nu[0]
if isinstance(nu, Tabulated1D):
# Evaluate Sher-Beck polynomial form at each tabulated value
func = deepcopy(nu)
func.y = (zeroth_order + 1.307*nu.x - 8.07e6*(nu.y - nu.y[0]))
elif isinstance(nu, Polynomial):
# Combine polynomials
if len(nu) == 1:
func = Polynomial([zeroth_order, 1.307])
else:
func = Polynomial(
[zeroth_order, 1.307 - 8.07e6*nu.coef[1]]
+ [-8.07e6*c for c in nu.coef[2:]])
else:
func = Polynomial(coeffs)
functions[name] = func
# Check for tabulated data
if lfc == 1:
for _ in range(nfc):
# Get tabulated function
items, eifc = get_tab1_record(file_obj)
# Determine which component it is
ifc = items[3]
name = _NAMES[ifc - 1]
# Replace value in dictionary
functions[name] = eifc
# Build the object
return cls(**functions)
@classmethod
def from_hdf5(cls, group):
"""Generate fission energy release data from an HDF5 group.
Parameters
----------
group : h5py.Group
HDF5 group to read from
Returns
-------
openmc.data.FissionEnergyRelease
Fission energy release data
"""
fragments = Function1D.from_hdf5(group['fragments'])
prompt_neutrons = Function1D.from_hdf5(group['prompt_neutrons'])
delayed_neutrons = Function1D.from_hdf5(group['delayed_neutrons'])
prompt_photons = Function1D.from_hdf5(group['prompt_photons'])
delayed_photons = Function1D.from_hdf5(group['delayed_photons'])
betas = Function1D.from_hdf5(group['betas'])
neutrinos = Function1D.from_hdf5(group['neutrinos'])
return cls(fragments, prompt_neutrons, delayed_neutrons, prompt_photons,
delayed_photons, betas, neutrinos)
def to_hdf5(self, group):
"""Write energy release data to an HDF5 group
Parameters
----------
group : h5py.Group
HDF5 group to write to
"""
self.fragments.to_hdf5(group, 'fragments')
self.prompt_neutrons.to_hdf5(group, 'prompt_neutrons')
self.delayed_neutrons.to_hdf5(group, 'delayed_neutrons')
self.prompt_photons.to_hdf5(group, 'prompt_photons')
self.delayed_photons.to_hdf5(group, 'delayed_photons')
self.betas.to_hdf5(group, 'betas')
self.neutrinos.to_hdf5(group, 'neutrinos')
self.q_prompt.to_hdf5(group, 'q_prompt')
self.q_recoverable.to_hdf5(group, 'q_recoverable')
| |
#!/usr/bin/env python
import warnings
# Dropping a table inexplicably produces a warning despite
# the "IF EXISTS" clause. Squelch these warnings.
warnings.simplefilter("ignore")
import json
import logging
import os
import signal
from subprocess import PIPE
import threading
import time
import unittest
import urllib
import urllib2
import environment
import utils
import tablet
from mysql_flavor import mysql_flavor
from protocols_flavor import protocols_flavor
from vtdb import dbexceptions
tablet_62344 = tablet.Tablet(62344)
tablet_62044 = tablet.Tablet(62044)
def setUpModule():
try:
if environment.topo_server().flavor() == 'zookeeper':
# this is a one-off test to make sure our zookeeper implementation
# behaves with a server that is not DNS-resolveable
environment.topo_server().setup(add_bad_host=True)
else:
environment.topo_server().setup()
# start mysql instance external to the test
setup_procs = [
tablet_62344.init_mysql(),
tablet_62044.init_mysql(),
]
utils.Vtctld().start()
utils.wait_procs(setup_procs)
except:
tearDownModule()
raise
def tearDownModule():
if utils.options.skip_teardown:
return
teardown_procs = [
tablet_62344.teardown_mysql(),
tablet_62044.teardown_mysql(),
]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
tablet_62344.remove_tree()
tablet_62044.remove_tree()
class TestTabletManager(unittest.TestCase):
def tearDown(self):
tablet.Tablet.check_vttablet_count()
environment.topo_server().wipe()
for t in [tablet_62344, tablet_62044]:
t.reset_replication()
t.clean_dbs()
def _check_srv_shard(self):
srvShard = utils.run_vtctl_json(['GetSrvShard', 'test_nj',
'test_keyspace/0'])
self.assertEqual(srvShard['master_cell'], 'test_nj')
# run twice to check behavior with existing znode data
def test_sanity(self):
self._test_sanity()
self._test_sanity()
def _test_sanity(self):
# Start up a master mysql and vttablet
utils.run_vtctl(['CreateKeyspace', '-force', 'test_keyspace'])
utils.run_vtctl(['createshard', '-force', 'test_keyspace/0'])
tablet_62344.init_tablet('master', 'test_keyspace', '0', parent=False)
utils.run_vtctl(['RebuildKeyspaceGraph', '-rebuild_srv_shards', 'test_keyspace'])
utils.validate_topology()
self._check_srv_shard()
# if these statements don't run before the tablet it will wedge
# waiting for the db to become accessible. this is more a bug than
# a feature.
tablet_62344.populate('vt_test_keyspace', self._create_vt_select_test,
self._populate_vt_select_test)
tablet_62344.start_vttablet()
# make sure the query service is started right away
qr = tablet_62344.execute('select * from vt_select_test')
self.assertEqual(len(qr['Rows']), 4,
"expected 4 rows in vt_select_test: %s" % str(qr))
# make sure direct dba queries work
query_result = utils.run_vtctl_json(['ExecuteFetchAsDba', '-want_fields', tablet_62344.tablet_alias, 'select * from vt_test_keyspace.vt_select_test'])
self.assertEqual(len(query_result['Rows']), 4, "expected 4 rows in vt_select_test: %s" % str(query_result))
self.assertEqual(len(query_result['Fields']), 2, "expected 2 fields in vt_select_test: %s" % str(query_result))
# check Ping / RefreshState
utils.run_vtctl(['Ping', tablet_62344.tablet_alias])
utils.run_vtctl(['RefreshState', tablet_62344.tablet_alias])
# Quickly check basic actions.
utils.run_vtctl(['SetReadOnly', tablet_62344.tablet_alias])
utils.wait_db_read_only(62344)
utils.run_vtctl(['SetReadWrite', tablet_62344.tablet_alias])
utils.check_db_read_write(62344)
utils.run_vtctl(['DemoteMaster', tablet_62344.tablet_alias])
utils.wait_db_read_only(62344)
utils.validate_topology()
utils.run_vtctl(['ValidateKeyspace', 'test_keyspace'])
# not pinging tablets, as it enables replication checks, and they
# break because we only have a single master, no slaves
utils.run_vtctl(['ValidateShard', '-ping-tablets=false',
'test_keyspace/0'])
self._check_srv_shard()
tablet_62344.kill_vttablet()
tablet_62344.init_tablet('idle')
tablet_62344.scrap(force=True)
def test_scrap(self):
# Start up a master mysql and vttablet
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
tablet_62344.init_tablet('master', 'test_keyspace', '0')
tablet_62044.init_tablet('replica', 'test_keyspace', '0')
utils.run_vtctl(['RebuildShardGraph', 'test_keyspace/*'])
utils.validate_topology()
self._check_srv_shard()
tablet_62044.scrap(force=True)
utils.validate_topology()
self._check_srv_shard()
_create_vt_select_test = '''create table vt_select_test (
id bigint auto_increment,
msg varchar(64),
primary key (id)
) Engine=InnoDB'''
_populate_vt_select_test = [
"insert into vt_select_test (msg) values ('test %s')" % x
for x in xrange(4)]
def test_actions_and_timeouts(self):
# Start up a master mysql and vttablet
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
tablet_62344.init_tablet('master', 'test_keyspace', '0')
utils.run_vtctl(['RebuildShardGraph', 'test_keyspace/0'])
utils.validate_topology()
self._check_srv_shard()
tablet_62344.create_db('vt_test_keyspace')
tablet_62344.start_vttablet()
utils.run_vtctl(['Ping', tablet_62344.tablet_alias])
# schedule long action in the background, sleep a little bit to make sure
# it started to run
args = (environment.binary_args('vtctl') +
environment.topo_server().flags() +
['-tablet_manager_protocol',
protocols_flavor().tablet_manager_protocol(),
'-tablet_protocol', protocols_flavor().tabletconn_protocol(),
'-log_dir', environment.vtlogroot,
'Sleep', tablet_62344.tablet_alias, '10s'])
bg = utils.run_bg(args)
time.sleep(3)
# try a frontend RefreshState that should timeout as the tablet is busy
# running the other one
stdout, stderr = utils.run_vtctl(['-wait-time', '3s',
'RefreshState', tablet_62344.tablet_alias],
expect_fail=True)
self.assertIn(protocols_flavor().rpc_timeout_message(), stderr)
# wait for the background vtctl
bg.wait()
if environment.topo_server().flavor() == 'zookeeper':
# extra small test: we ran for a while, get the states we were in,
# make sure they're accounted for properly
# first the query engine States
v = utils.get_vars(tablet_62344.port)
logging.debug("vars: %s" % str(v))
# then the Zookeeper connections
if v['ZkMetaConn']['test_nj']['Current'] != 'Connected':
self.fail('invalid zk test_nj state: %s' %
v['ZkMetaConn']['test_nj']['Current'])
if v['ZkMetaConn']['global']['Current'] != 'Connected':
self.fail('invalid zk global state: %s' %
v['ZkMetaConn']['global']['Current'])
if v['ZkMetaConn']['test_nj']['DurationConnected'] < 10e9:
self.fail('not enough time in Connected state: %d',
v['ZkMetaConn']['test_nj']['DurationConnected'])
if v['TabletType'] != 'master':
self.fail('TabletType not exported correctly')
tablet_62344.kill_vttablet()
def test_vttablet_authenticated(self):
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
tablet_62344.init_tablet('master', 'test_keyspace', '0')
utils.run_vtctl(['RebuildShardGraph', 'test_keyspace/0'])
utils.validate_topology()
self._check_srv_shard()
tablet_62344.populate('vt_test_keyspace', self._create_vt_select_test,
self._populate_vt_select_test)
tablet_62344.start_vttablet(auth=True)
utils.run_vtctl(['SetReadWrite', tablet_62344.tablet_alias])
# make sure we can connect using secure connection
conn = tablet_62344.conn(user='ala', password=r'ma kota')
results, rowcount, lastrowid, fields = conn._execute('select * from vt_select_test', {})
logging.debug("Got results: %s", str(results))
self.assertEqual(len(results), 4, 'got wrong result length: %s' % str(results))
conn.close()
tablet_62344.kill_vttablet()
# TODO(szopa): Test that non-authenticated queries do not pass
# through (when we get to that point).
def _check_string_in_hook_result(self, text, expected):
if isinstance(expected, basestring):
expected = [expected]
for exp in expected:
if exp in text:
return
logging.warning("ExecuteHook output:\n%s", text)
self.fail("ExecuteHook returned unexpected result, no string: '" + "', '".join(expected) + "'")
def _run_hook(self, params, expectedStrings):
out, err = utils.run_vtctl(['--alsologtostderr', 'ExecuteHook',
tablet_62344.tablet_alias] + params,
mode=utils.VTCTL_VTCTL, trap_output=True,
raise_on_error=False)
for expected in expectedStrings:
self._check_string_in_hook_result(err, expected)
def test_hook(self):
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
# create the database so vttablets start, as it is serving
tablet_62344.create_db('vt_test_keyspace')
tablet_62344.init_tablet('master', 'test_keyspace', '0', start=True)
# test a regular program works
self._run_hook(['test.sh', '--flag1', '--param1=hello'], [
'"ExitStatus": 0',
['"Stdout": "TABLET_ALIAS: test_nj-0000062344\\nPARAM: --flag1\\nPARAM: --param1=hello\\n"',
'"Stdout": "TABLET_ALIAS: test_nj-0000062344\\nPARAM: --param1=hello\\nPARAM: --flag1\\n"',
],
'"Stderr": ""',
])
# test stderr output
self._run_hook(['test.sh', '--to-stderr'], [
'"ExitStatus": 0',
'"Stdout": "TABLET_ALIAS: test_nj-0000062344\\nPARAM: --to-stderr\\n"',
'"Stderr": "ERR: --to-stderr\\n"',
])
# test commands that fail
self._run_hook(['test.sh', '--exit-error'], [
'"ExitStatus": 1',
'"Stdout": "TABLET_ALIAS: test_nj-0000062344\\nPARAM: --exit-error\\n"',
'"Stderr": "ERROR: exit status 1\\n"',
])
# test hook that is not present
self._run_hook(['not_here.sh'], [
'"ExitStatus": -1',
'"Stdout": "Skipping missing hook: /', # cannot go further, local path
'"Stderr": ""',
])
# test hook with invalid name
self._run_hook(['/bin/ls'], [
"action failed: ExecuteHook hook name cannot have a '/' in it",
])
tablet_62344.kill_vttablet()
def test_restart(self):
"""test_restart tests that when starting a second vttablet with the same
configuration as another one, it will kill the previous process
and take over listening on the socket.
If vttablet listens to other ports (like gRPC), this feature will
break. We believe it is not widely used, so we're OK with this for now.
(container based installations usually handle tablet restarts
by using a different set of servers, and do not rely on this feature
at all).
"""
if environment.topo_server().flavor() != 'zookeeper':
logging.info("Skipping this test in non-github tree")
return
if tablet_62344.grpc_enabled():
logging.info("Skipping this test as second gRPC port interferes")
return
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
# create the database so vttablets start, as it is serving
tablet_62344.create_db('vt_test_keyspace')
tablet_62344.init_tablet('master', 'test_keyspace', '0')
proc1 = tablet_62344.start_vttablet()
proc2 = tablet_62344.start_vttablet()
for timeout in xrange(20):
logging.debug("Sleeping waiting for first process to die")
time.sleep(1.0)
proc1.poll()
if proc1.returncode is not None:
break
if proc1.returncode is None:
self.fail("proc1 still running")
tablet_62344.kill_vttablet()
def test_scrap_and_reinit(self):
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
tablet_62344.create_db('vt_test_keyspace')
tablet_62044.create_db('vt_test_keyspace')
# one master one replica
tablet_62344.init_tablet('master', 'test_keyspace', '0')
tablet_62044.init_tablet('replica', 'test_keyspace', '0')
# make sure the replica is in the replication graph
before_scrap = utils.run_vtctl_json(['GetShardReplication', 'test_nj',
'test_keyspace/0'])
self.assertEqual(2, len(before_scrap['nodes']),
'wrong shard replication nodes before: %s' %
str(before_scrap))
# scrap and re-init
utils.run_vtctl(['ScrapTablet', '-force', tablet_62044.tablet_alias])
tablet_62044.init_tablet('replica', 'test_keyspace', '0')
after_scrap = utils.run_vtctl_json(['GetShardReplication', 'test_nj',
'test_keyspace/0'])
self.assertEqual(2, len(after_scrap['nodes']),
'wrong shard replication nodes after: %s' %
str(after_scrap))
# manually add a bogus entry to the replication graph, and check
# it is removed by ShardReplicationFix
utils.run_vtctl(['ShardReplicationAdd', 'test_keyspace/0',
'test_nj-0000066666'], auto_log=True)
with_bogus = utils.run_vtctl_json(['GetShardReplication', 'test_nj',
'test_keyspace/0'])
self.assertEqual(3, len(with_bogus['nodes']),
'wrong shard replication nodes with bogus: %s' %
str(with_bogus))
utils.run_vtctl(['ShardReplicationFix', 'test_nj', 'test_keyspace/0'],
auto_log=True)
after_fix = utils.run_vtctl_json(['GetShardReplication', 'test_nj',
'test_keyspace/0'])
self.assertEqual(2, len(after_scrap['nodes']),
'wrong shard replication nodes after fix: %s' %
str(after_fix))
def check_healthz(self, tablet, expected):
if expected:
self.assertEqual("ok\n", tablet.get_healthz())
else:
with self.assertRaises(urllib2.HTTPError):
tablet.get_healthz()
def wait_for_tablet_type_change(self, tablet_alias, expected_type):
t = tablet.Tablet.tablet_type_value[expected_type.upper()]
timeout = 10
while True:
ti = utils.run_vtctl_json(['GetTablet', tablet_alias])
if ti['type'] == t:
logging.debug('Slave tablet went to %s, good' % expected_type)
break
timeout = utils.wait_step('slave becomes ' + expected_type, timeout)
def test_health_check(self):
# one master, one replica that starts in spare
# (for the replica, we let vttablet do the InitTablet)
tablet_62344.init_tablet('master', 'test_keyspace', '0')
for t in tablet_62344, tablet_62044:
t.create_db('vt_test_keyspace')
tablet_62344.start_vttablet(wait_for_state=None,
target_tablet_type='replica')
tablet_62044.start_vttablet(wait_for_state=None,
target_tablet_type='replica',
lameduck_period='5s',
init_keyspace='test_keyspace',
init_shard='0')
tablet_62344.wait_for_vttablet_state('SERVING')
tablet_62044.wait_for_vttablet_state('NOT_SERVING')
self.check_healthz(tablet_62044, False)
utils.run_vtctl(['InitShardMaster', 'test_keyspace/0',
tablet_62344.tablet_alias])
# make sure the 'spare' slave goes to 'replica'
self.wait_for_tablet_type_change(tablet_62044.tablet_alias, "replica")
self.check_healthz(tablet_62044, True)
# make sure the master is still master
ti = utils.run_vtctl_json(['GetTablet', tablet_62344.tablet_alias])
self.assertEqual(ti['type'], tablet.Tablet.tablet_type_value['MASTER'],
"unexpected master type: %s" % ti['type'])
# stop replication, make sure we go unhealthy.
utils.run_vtctl(['StopSlave', tablet_62044.tablet_alias])
self.wait_for_tablet_type_change(tablet_62044.tablet_alias, "spare")
self.check_healthz(tablet_62044, False)
# make sure the serving graph was updated
timeout = 10
while True:
try:
utils.run_vtctl_json(['GetEndPoints', 'test_nj', 'test_keyspace/0',
'replica'])
except:
logging.debug("Tablet is gone from serving graph, good")
break
timeout = utils.wait_step('Stopped replication didn\'t trigger removal from serving graph', timeout)
# make sure status web page is unhappy
self.assertIn('>unhealthy: replication_reporter: Replication is not running</span></div>', tablet_62044.get_status())
# make sure the health stream is updated
health = utils.run_vtctl_json(['VtTabletStreamHealth',
'-count', '1',
tablet_62044.tablet_alias])
self.assertIn('replication_reporter: Replication is not running', health['realtime_stats']['health_error'])
# then restart replication, and write data, make sure we go back to healthy
utils.run_vtctl(['StartSlave', tablet_62044.tablet_alias])
self.wait_for_tablet_type_change(tablet_62044.tablet_alias, "replica")
# make sure status web page is healthy
self.assertIn('>healthy</span></div>', tablet_62044.get_status())
# make sure the vars is updated
v = utils.get_vars(tablet_62044.port)
self.assertEqual(v['LastHealthMapCount'], 0)
# now test VtTabletStreamHealth returns the right thing
stdout, stderr = utils.run_vtctl(['VtTabletStreamHealth',
'-count', '2',
tablet_62044.tablet_alias],
trap_output=True, auto_log=True)
lines = stdout.splitlines()
self.assertEqual(len(lines), 2)
for line in lines:
logging.debug("Got health: %s", line)
data = json.loads(line)
self.assertIn('realtime_stats', data)
self.assertNotIn('health_error', data['realtime_stats'])
self.assertNotIn('tablet_externally_reparented_timestamp', data)
self.assertEqual('test_keyspace', data['target']['keyspace'])
self.assertEqual('0', data['target']['shard'])
self.assertEqual(3, data['target']['tablet_type'])
# kill the tablets
tablet.kill_tablets([tablet_62344, tablet_62044])
# the replica was in lameduck for 5 seconds, should have been enough
# to reset its state to spare
ti = utils.run_vtctl_json(['GetTablet', tablet_62044.tablet_alias])
self.assertEqual(ti['type'], tablet.Tablet.tablet_type_value['SPARE'],
"tablet didn't go to spare while in lameduck mode: %s" % str(ti))
def test_health_check_worker_state_does_not_shutdown_query_service(self):
# This test is similar to test_health_check, but has the following differences:
# - the second tablet is an "rdonly" and not a "replica"
# - the second tablet will be set to "worker" and we expect that the query service won't be shutdown
# Setup master and rdonly tablets.
tablet_62344.init_tablet('master', 'test_keyspace', '0')
for t in tablet_62344, tablet_62044:
t.create_db('vt_test_keyspace')
tablet_62344.start_vttablet(wait_for_state=None,
target_tablet_type='replica')
tablet_62044.start_vttablet(wait_for_state=None,
target_tablet_type='rdonly',
init_keyspace='test_keyspace',
init_shard='0')
tablet_62344.wait_for_vttablet_state('SERVING')
tablet_62044.wait_for_vttablet_state('NOT_SERVING')
self.check_healthz(tablet_62044, False)
# Enable replication.
utils.run_vtctl(['InitShardMaster', 'test_keyspace/0',
tablet_62344.tablet_alias])
# Trigger healthcheck to save time waiting for the next interval.
utils.run_vtctl(["RunHealthCheck", tablet_62044.tablet_alias, 'rdonly'])
self.wait_for_tablet_type_change(tablet_62044.tablet_alias, 'rdonly')
self.check_healthz(tablet_62044, True)
tablet_62044.wait_for_vttablet_state('SERVING')
# Change from rdonly to worker and stop replication. (These actions are similar to the SplitClone vtworker command implementation.)
# The tablet will become unhealthy, but the query service is still running.
utils.run_vtctl(["ChangeSlaveType", tablet_62044.tablet_alias, "worker"])
utils.run_vtctl(['StopSlave', tablet_62044.tablet_alias])
# Trigger healthcheck explicitly to avoid waiting for the next interval.
utils.run_vtctl(["RunHealthCheck", tablet_62044.tablet_alias, "rdonly"])
self.wait_for_tablet_type_change(tablet_62044.tablet_alias, "worker")
self.check_healthz(tablet_62044, False)
# Make sure that replication got disabled.
self.assertIn('>unhealthy: replication_reporter: Replication is not running</span></div>', tablet_62044.get_status())
# Query service is still running.
tablet_62044.wait_for_vttablet_state('SERVING')
# Restart replication. Tablet will become healthy again.
utils.run_vtctl(["ChangeSlaveType", tablet_62044.tablet_alias, "spare"])
self.wait_for_tablet_type_change(tablet_62044.tablet_alias, "spare")
utils.run_vtctl(['StartSlave', tablet_62044.tablet_alias])
utils.run_vtctl(["RunHealthCheck", tablet_62044.tablet_alias, "rdonly"])
self.wait_for_tablet_type_change(tablet_62044.tablet_alias, "rdonly")
self.check_healthz(tablet_62044, True)
tablet_62044.wait_for_vttablet_state('SERVING')
# kill the tablets
tablet.kill_tablets([tablet_62344, tablet_62044])
def test_no_mysql_healthcheck(self):
"""This test starts a vttablet with no mysql port, while mysql is down.
It makes sure vttablet will start properly and be unhealthy.
Then we start mysql, and make sure vttablet becomes healthy.
"""
# we need replication to be enabled, so the slave tablet can be healthy.
for t in tablet_62344, tablet_62044:
t.create_db('vt_test_keyspace')
pos = mysql_flavor().master_position(tablet_62344)
# Use "localhost" as hostname because Travis CI worker hostnames are too long for MySQL replication.
changeMasterCmds = mysql_flavor().change_master_commands(
"localhost",
tablet_62344.mysql_port,
pos)
tablet_62044.mquery('', ['RESET MASTER', 'RESET SLAVE'] +
changeMasterCmds +
['START SLAVE'])
# now shutdown all mysqld
shutdown_procs = [
tablet_62344.shutdown_mysql(),
tablet_62044.shutdown_mysql(),
]
utils.wait_procs(shutdown_procs)
# start the tablets, wait for them to be NOT_SERVING (mysqld not there)
tablet_62344.init_tablet('master', 'test_keyspace', '0')
tablet_62044.init_tablet('spare', 'test_keyspace', '0',
include_mysql_port=False)
for t in tablet_62344, tablet_62044:
t.start_vttablet(wait_for_state=None,
target_tablet_type='replica',
full_mycnf_args=True, include_mysql_port=False)
for t in tablet_62344, tablet_62044:
t.wait_for_vttablet_state('NOT_SERVING')
self.check_healthz(t, False)
# restart mysqld
start_procs = [
tablet_62344.start_mysql(),
tablet_62044.start_mysql(),
]
utils.wait_procs(start_procs)
# the master should still be healthy
utils.run_vtctl(['RunHealthCheck', tablet_62344.tablet_alias, 'replica'],
auto_log=True)
self.check_healthz(tablet_62344, True)
# the slave won't be healthy at first, as replication is not running
utils.run_vtctl(['RunHealthCheck', tablet_62044.tablet_alias, 'replica'],
auto_log=True)
self.check_healthz(tablet_62044, False)
tablet_62044.wait_for_vttablet_state('NOT_SERVING')
# restart replication
tablet_62044.mquery('', ['START SLAVE'])
# wait for the tablet to become healthy and fix its mysql port
utils.run_vtctl(['RunHealthCheck', tablet_62044.tablet_alias, 'replica'],
auto_log=True)
tablet_62044.wait_for_vttablet_state('SERVING')
self.check_healthz(tablet_62044, True)
for t in tablet_62344, tablet_62044:
# wait for mysql port to show up
timeout = 10
while True:
ti = utils.run_vtctl_json(['GetTablet', t.tablet_alias])
if 'mysql' in ti['port_map']:
break
timeout = utils.wait_step('mysql port in tablet record', timeout)
self.assertEqual(ti['port_map']['mysql'], t.mysql_port)
# all done
tablet.kill_tablets([tablet_62344, tablet_62044])
def test_repeated_init_shard_master(self):
for t in tablet_62344, tablet_62044:
t.create_db('vt_test_keyspace')
t.start_vttablet(wait_for_state=None,
target_tablet_type='replica',
lameduck_period='5s',
init_keyspace='test_keyspace',
init_shard='0')
# tablets are not replicating, so they won't be healthy
for t in tablet_62344, tablet_62044:
t.wait_for_vttablet_state('NOT_SERVING')
self.check_healthz(t, False)
# pick one master out of the two
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/0',
tablet_62344.tablet_alias])
# run health check on both, make sure they are both healthy
for t in tablet_62344, tablet_62044:
utils.run_vtctl(['RunHealthCheck', t.tablet_alias, 'replica'],
auto_log=True)
self.check_healthz(t, True)
# pick the other one as master, make sure they are still healthy
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/0',
tablet_62044.tablet_alias])
# run health check on both, make sure they are both healthy
for t in tablet_62344, tablet_62044:
utils.run_vtctl(['RunHealthCheck', t.tablet_alias, 'replica'],
auto_log=True)
self.check_healthz(t, True)
# and come back to the original guy
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/0',
tablet_62344.tablet_alias])
# run health check on both, make sure they are both healthy
for t in tablet_62344, tablet_62044:
utils.run_vtctl(['RunHealthCheck', t.tablet_alias, 'replica'],
auto_log=True)
self.check_healthz(t, True)
# and done
tablet.kill_tablets([tablet_62344, tablet_62044])
def test_fallback_policy(self):
tablet_62344.create_db('vt_test_keyspace')
tablet_62344.init_tablet('master', 'test_keyspace', '0')
proc1 = tablet_62344.start_vttablet(security_policy="bogus")
f = urllib.urlopen('http://localhost:%d/queryz' % int(tablet_62344.port))
response = f.read()
f.close()
self.assertIn('not allowed', response)
tablet_62344.kill_vttablet()
if __name__ == '__main__':
utils.main()
| |
# -*- coding: utf-8 -*-
"""
Document Library - Controllers
"""
module = request.controller
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
# =============================================================================
def index():
"Module's Home Page"
module_name = settings.modules[module].name_nice
response.title = module_name
return dict(module_name=module_name)
# =============================================================================
def document():
""" RESTful CRUD controller """
# Pre-processor
def prep(r):
# Location Filter
s3db.gis_location_filter(r)
if r.method in ("create", "create.popup"):
# Coming from Profile page
doc_id = request.get_vars.get("~.doc_id", None)
if doc_id:
s3db.doc_document.doc_id.default = doc_id
return True
s3.prep = prep
output = s3_rest_controller(rheader=document_rheader)
return output
# -----------------------------------------------------------------------------
def document_rheader(r):
if r.representation == "html":
doc_document = r.record
if doc_document:
#rheader_tabs = s3_rheader_tabs(r, document_tabs(r))
table = db.doc_document
rheader = DIV(B("%s: " % T("Name")), doc_document.name,
TABLE(TR(
TH("%s: " % T("File")), table.file.represent( doc_document.file ),
TH("%s: " % T("URL")), table.url.represent( doc_document.url ),
),
TR(
TH("%s: " % ORGANISATION), table.organisation_id.represent( doc_document.organisation_id ),
TH("%s: " % T("Person")), table.person_id.represent( doc_document.organisation_id ),
),
),
#rheader_tabs
)
return rheader
return None
# -----------------------------------------------------------------------------
def document_tabs(r):
"""
Display the number of Components in the tabs
- currently unused as we don't have these tabs off documents
"""
tab_opts = [{"tablename": "assess_rat",
"resource": "rat",
"one_title": "1 Assessment",
"num_title": " Assessments",
},
{"tablename": "irs_ireport",
"resource": "ireport",
"one_title": "1 Incident Report",
"num_title": " Incident Reports",
},
{"tablename": "cr_shelter",
"resource": "shelter",
"one_title": "1 Shelter",
"num_title": " Shelters",
},
#{"tablename": "flood_freport",
# "resource": "freport",
# "one_title": "1 Flood Report",
# "num_title": " Flood Reports",
#},
{"tablename": "req_req",
"resource": "req",
"one_title": "1 Request",
"num_title": " Requests",
},
]
tabs = [(T("Details"), None)]
crud_string = s3base.S3CRUD.crud_string
for tab_opt in tab_opts:
tablename = tab_opt["tablename"]
if tablename in db and document_id in db[tablename]:
table = db[tablename]
query = (table.deleted == False) & \
(table.document_id == r.id)
tab_count = db(query).count()
if tab_count == 0:
label = crud_string(tablename, "title_create")
elif tab_count == 1:
label = tab_opt["one_title"]
else:
label = T(str(tab_count) + tab_opt["num_title"] )
tabs.append( (label, tab_opt["resource"] ) )
return tabs
# =============================================================================
def source():
""" RESTful CRUD controller """
# Pre-processor
def prep(r):
# Location Filter
s3db.gis_location_filter(r)
return True
s3.prep = prep
output = s3_rest_controller()
return output
# =============================================================================
def image():
""" RESTful CRUD controller """
# Pre-processor
def prep(r):
# Location Filter
s3db.gis_location_filter(r)
if r.method in ("create", "create.popup"):
# Coming from Profile page
doc_id = request.get_vars.get("~.doc_id", None)
if doc_id:
s3db.doc_image.doc_id.default = doc_id
return True
s3.prep = prep
def postp(r, output):
if r.method == "update" and r.http == "POST":
points = r.vars.get("imagecrop-points")
if not points:
return output
filename = r.resource.records()[0]["file"]
points = map(float, points.split(","))
path = os.path.join(request.folder, "uploads", "images", filename)
current.s3task.async("crop_image",
args=[path] + points + [S3ImageCropWidget.DEFAULT_WIDTH])
return output
s3.postp = postp
output = s3_rest_controller()
return output
# =============================================================================
def bulk_upload():
"""
Custom view to allow bulk uploading of Photos
@ToDo: Allow creation of a GIS Feature Layer to view on the map
@ToDo: Allow uploading of associated GPX track for timestamp correlation.
See r1595 for the previous draft of this work
"""
s3.stylesheets.append("plugins/fileuploader.css")
return dict()
def upload_bulk():
"""
Receive the Uploaded data from bulk_upload()
https://github.com/valums/file-uploader/blob/master/server/readme.txt
@ToDo: Read EXIF headers to geolocate the Photos
"""
tablename = "doc_image"
table = s3db[tablename]
import cgi
source = request.post_vars.get("qqfile", None)
if isinstance(source, cgi.FieldStorage) and source.filename:
# For IE6-8, Opera, older versions of other browsers you get the file as you normally do with regular form-base uploads.
name = source.filename
image = source.file
else:
# For browsers which upload file with progress bar, you will need to get the raw post data and write it to the file.
if "name" in request.vars:
name = request.vars.name
else:
HTTP(400, "Invalid Request: Need a Name!")
image = request.body.read()
# Convert to StringIO for onvalidation/import
import cStringIO
image = cStringIO.StringIO(image)
source = Storage()
source.filename = name
source.file = image
form = SQLFORM(table)
vars = Storage()
vars.name = name
vars.image = source
vars._formname = "%s_create" % tablename
# onvalidation callback
onvalidation = s3db.get_config(tablename, "create_onvalidation",
s3db.get_config(tablename, "onvalidation"))
if form.accepts(vars, onvalidation=onvalidation):
msg = Storage(success = True)
# onaccept callback
onaccept = s3db.get_config(tablename, "create_onaccept",
s3db.get_config(tablename, "onaccept"))
from gluon.tools import callback
callback(onaccept, form, tablename=tablename)
else:
error_msg = ""
for error in form.errors:
error_msg = "%s\n%s:%s" % (error_msg, error, form.errors[error])
msg = Storage(error = error_msg)
response.headers["Content-Type"] = "text/html" # This is what the file-uploader widget expects
return json.dumps(msg)
# END =========================================================================
| |
#!/usr/bin/env python3
import functools
from math import exp
from os import path
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data as mnist_data
from tensor_functions import bias_variable, weight_variable, \
embedding_initializer, build_mnist_embeddings
def run():
"""
Convolutional NN
Activation function: sigmoid
Optimizer: GradientDescentOptimizer
:return:
"""
# ------ Data ------
mnist = mnist_data.read_data_sets(
"data",
one_hot=True,
reshape=False,
validation_size=0
)
# ------ Constants -------
# Image Format
width = 28
height = 28
output = 10
# Data
epoch_total = 3
batch_total = 1001
batch_size = 100
test_freq = 10 * epoch_total
# Learning Rate Values
lrmax = 0.003
lrmin = 0.00001
decay_speed = 2000.0
# Drop-off
keep_ratio = 0.9
# Layers
filters = [
[5, 5],
[4, 4],
[4, 4],
]
# channels = [1, 4, 8, 12]
channels = [1, 6, 12, 24]
strides = [
1,
2,
2
]
connect_nodes = 200
embedding_size = 1024
# Tensor Board Log
logs_path = "tensor_log/%s/" % path.splitext(path.basename(__file__))[0]
embed_path = path.join(logs_path, "model.ckpt")
sprite_path, label_path = build_mnist_embeddings('data', mnist)
# Place holders
X = tf.placeholder(tf.float32, [None, width, height, 1], name="Input_PH")
Y_ = tf.placeholder(tf.float32, [None, output], name="Output_PH")
L = tf.placeholder(tf.float32, name="Learning_Rate_PH")
keep_prob = tf.placeholder(tf.float32, name="Per_Keep_PH")
# Initialize Activation
Y = X
img_reduce = functools.reduce((lambda x, y: x * y), strides)
conv_nodes = int((width / img_reduce) * (height / img_reduce) * (channels[-1]))
# ----- Weights and Bias -----
weights = []
biases = []
for i in range(len(filters)):
with tf.name_scope('Layer'):
weights.append(weight_variable(filters[i] + channels[i:i+2]))
biases.append(bias_variable([channels[i+1]]))
with tf.name_scope('Layer'):
WConnect = weight_variable([conv_nodes, connect_nodes])
BConnect = bias_variable([connect_nodes])
with tf.name_scope('Layer'):
WOutput = weight_variable([connect_nodes, output])
BOutput = bias_variable([output])
# ---------------- Operations ----------------
# ------- Activation Function -------
for i in range(len(strides)):
with tf.name_scope('Wx_plus_b'):
conv_layer = tf.nn.conv2d(
Y, weights[i],
strides=[1, strides[i], strides[i], 1],
padding='SAME'
)
preactivate = conv_layer + biases[i]
tf.summary.histogram('Pre_Activations', preactivate)
activations = tf.nn.relu(preactivate)
tf.summary.histogram('Activations', activations)
Y = activations
YY = tf.reshape(Y, [-1, conv_nodes])
activations = tf.nn.relu(tf.matmul(YY, WConnect) + BConnect)
fully_connected_dropout = tf.nn.dropout(activations, keep_prob)
# ------- Regression Functions -------
with tf.name_scope('Wx_plus_b'):
logits = tf.matmul(fully_connected_dropout, WOutput, name="Product") + BOutput
tf.summary.histogram('Pre_Activations', logits)
Y = tf.nn.softmax(logits, name="Output_Result")
# ------- Loss Function -------
with tf.name_scope('Loss'):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=Y_, name="Cross_Entropy")
with tf.name_scope('Total'):
loss = tf.reduce_mean(cross_entropy, name="loss") * 100
tf.summary.scalar('Losses', loss)
# ------- Optimizer -------
with tf.name_scope('Optimizer'):
optimizer = tf.train.AdamOptimizer(L)
train_step = optimizer.minimize(loss, name="minimize")
# ------- Accuracy -------
with tf.name_scope('Accuracy'):
with tf.name_scope('correct_prediction'):
is_correct = tf.equal(
tf.argmax(Y, 1, name="Max_Result"),
tf.argmax(Y_, 1, name="Target")
)
with tf.name_scope('accuracy'):
accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))
tf.summary.scalar('Accuracies', accuracy)
# ------- Tensor Graph -------
# Start Tensor Graph
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
# Tensor Board
merged_summary_op = tf.summary.merge_all()
# Writer
tensor_graph = tf.get_default_graph()
train_writer = tf.summary.FileWriter(logs_path + "train", graph=tensor_graph)
test_writer = tf.summary.FileWriter(logs_path + "test")
# Embeddings
assignment = embedding_initializer(
fully_connected_dropout,
embedding_size,
test_writer,
[height, width],
sprite_path,
label_path,
)
saver = tf.train.Saver()
# ------- Training -------
train_operations = [train_step, loss, merged_summary_op]
test_operations = [accuracy, loss, merged_summary_op]
test_data = {X: mnist.test.images, Y_: mnist.test.labels, keep_prob: 1.0, L: 0}
embed_data = {X: mnist.test.images[:embedding_size], Y_: mnist.test.labels[:embedding_size], keep_prob: 1.0, L: 0}
for epoch in range(epoch_total):
avg_cost = 0.
for i in range(batch_total):
step = (batch_total * epoch) + i
# ----- Train step -----
batch_X, batch_Y = mnist.train.next_batch(batch_size)
learning_rate = lrmin + (lrmax - lrmin) * exp(-step / decay_speed)
train_data = {
X: batch_X,
Y_: batch_Y,
L: learning_rate,
keep_prob: keep_ratio
}
# Record execution stats
if step % 100 == 99:
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
_, cross_loss, summary = sess.run(
train_operations,
feed_dict=train_data,
options=run_options,
run_metadata=run_metadata
)
else:
_, cross_loss, summary = sess.run(
train_operations,
feed_dict=train_data
)
# ----- Test Step -----
if step % test_freq == 0:
acc, cross_loss, summary = sess.run(
test_operations,
feed_dict=test_data
)
test_writer.add_summary(summary, step)
print('Accuracy at step %s: %s' % (step, acc))
# ----- Embedding -----
if step % 500 == 0:
sess.run(assignment, feed_dict=embed_data)
saver.save(sess, embed_path, step)
avg_cost += cross_loss / batch_total
train_writer.add_summary(summary, step)
# Display logs per epoch step
print("Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(avg_cost))
if __name__ == "__main__":
run()
| |
#
# Copyright (c) 2011, Regents of the University of California
# BSD license, See the COPYING file for more information
# Written by: Derek Kulinski <takeda@takeda.tk>
# Jeff Burke <jburke@ucla.edu>
#
from . import _pyndn
import utils
class ContentType(utils.Enum):
_prefix = "pyndn"
CONTENT_DATA = ContentType.new_flag('CONTENT_DATA', 0x0C04C0)
CONTENT_ENCR = ContentType.new_flag('CONTENT_ENCR', 0x10D091)
CONTENT_GONE = ContentType.new_flag('CONTENT_GONE', 0x18E344)
CONTENT_KEY = ContentType.new_flag('CONTENT_KEY', 0x28463F)
CONTENT_LINK = ContentType.new_flag('CONTENT_LINK', 0x2C834A)
CONTENT_NACK = ContentType.new_flag('CONTENT_NACK', 0x34008A)
class ContentObject(object):
def __init__(self, name = None, content = None, signed_info = None):
self.name = name
self.content = content
self.signedInfo = signed_info or SignedInfo()
self.digestAlgorithm = None # Default
# generated
self.signature = None
self.verified = False
# pyndn
self.ndn = None # Reference to NDN object
self.ndn_data_dirty = True
self.ndn_data = None # backing charbuf
# this is the finalization step
# must pass a key here, there is no "default key" because
# a NDN handle is not required to create the content object
# thus there is no access to the ndn library keystore.
#
def sign(self, key):
self.ndn_data = _pyndn.encode_ContentObject(self, self.name.ndn_data, \
self.content, self.signedInfo.ndn_data, key)
self.ndn_data_dirty = False
def digest(self):
return _pyndn.digest_contentobject(self.ndn_data)
def verify_content(self, handle):
return _pyndn.verify_content(handle.ndn_data, self.ndn_data)
def verify_signature(self, key):
return _pyndn.verify_signature(self.ndn_data, key.ndn_data_public)
def matchesInterest(self, interest):
return _pyndn.content_matches_interest(self.ndn_data, interest.ndn_data)
def __setattr__(self, name, value):
if name == 'name' or name == 'content' or name == 'signedInfo' or name == 'digestAlgorithm':
self.ndn_data_dirty = True
if name == 'content':
object.__setattr__(self, name, _pyndn.content_to_bytes(value))
else:
object.__setattr__(self, name, value)
def __getattribute__(self, name):
if name == "ndn_data":
if object.__getattribute__(self, 'ndn_data_dirty'):
raise _pyndn.NDNContentObjectError("Call sign() to finalize \
before accessing ndn_data for a ContentObject")
return object.__getattribute__(self, name)
# Where do we support versioning and segmentation?
def __str__(self):
ret = []
ret.append("Name: %s" % self.name)
ret.append("Content: %r" % self.content)
ret.append("DigestAlg: %r" % self.digestAlgorithm)
ret.append("SignedInfo: %s" % self.signedInfo)
ret.append("Signature: %s" % self.signature)
return "\n".join(ret)
def __repr__(self):
args = []
if self.name is not None:
args += ["name=%r" % self.name]
if self.content is not None:
args += ["content=%r" % self.content]
if self.signedInfo is not None:
args += ["signed_info=%r" % self.signedInfo]
if self.signature is not None:
args += ["<signed>"]
return "pyndn.ContentObject(%s)" % ", ".join(args)
class Signature(object):
def __init__(self):
self.digestAlgorithm = None
self.witness = None
self.signatureBits = None
# pyndn
self.ndn_data_dirty = False
self.ndn_data = None
def __setattr__(self, name, value):
if name == 'witness' or name == 'signatureBits' or name == 'digestAlgorithm':
self.ndn_data_dirty = True
object.__setattr__(self, name, value)
def __getattribute__(self, name):
if name == "ndn_data":
if object.__getattribute__(self, 'ndn_data_dirty'):
self.ndn_data = _pyndn.Signature_obj_to_ndn(self)
self.ndn_data_dirty = False
return object.__getattribute__(self, name)
def __str__(self):
res = []
res.append("digestAlgorithm = %s" % self.digestAlgorithm)
res.append("witness = %s" % self.witness)
res.append("signatureBits = %r" % self.signatureBits)
return "\n".join(res)
class SignedInfo(object):
def __init__(self, key_digest = None, key_locator = None, type = CONTENT_DATA,
freshness = None, final_block = None, py_timestamp = None,
timestamp = None):
self.publisherPublicKeyDigest = key_digest
if py_timestamp is not None:
if timestamp:
raise ValueError("You can define only timestamp or py_timestamp")
self.timeStamp = utils.py2ndn_time(py_timestamp)
else:
self.timeStamp = timestamp
self.type = type
self.freshnessSeconds = freshness
self.finalBlockID = final_block
self.keyLocator = key_locator
# pyndn
self.ndn_data_dirty = True
self.ndn_data = None # backing charbuf
def __setattr__(self, name, value):
if name != "ndn_data" and name != "ndn_data_dirty":
self.ndn_data_dirty = True
if name == "type" and type(value) is not ContentType:
value = ContentType(value)
object.__setattr__(self, name, value)
def __getattribute__(self, name):
if name == "ndn_data":
if object.__getattribute__(self, 'ndn_data_dirty'):
key_locator = self.keyLocator.ndn_data if self.keyLocator else None
self.ndn_data = _pyndn.SignedInfo_to_ndn(\
self.publisherPublicKeyDigest, self.type, self.timeStamp, \
self.freshnessSeconds or (-1), self.finalBlockID, key_locator)
self.ndn_data_dirty = False
if name == "py_timestamp":
ts = self.timeStamp
if ts is None:
return None
return None if ts is None else utils.ndn2py_time(ts)
return object.__getattribute__(self, name)
def __repr__(self):
args = []
if self.publisherPublicKeyDigest is not None:
args += ["key_digest=%r" % self.publisherPublicKeyDigest]
if self.keyLocator is not None:
args += ["key_locator=%r" % self.keyLocator]
if self.type is not None:
args += ["type=%r" % self.type]
if self.freshnessSeconds is not None:
args += ["freshness=%r" % self.freshnessSeconds]
if self.finalBlockID is not None:
args += ["final_block=%r" % self.finalBlockID]
if self.timeStamp is not None:
args += ["py_timestamp=%r" % self.py_timestamp]
return "pyndn.SignedInfo(%s)" % ", ".join(args)
#
#
# These are not used in signing in Python (all the info needed is in SignedInfo)
# But it is here in case the parsing of the c library version of signing params
# is needed.
class SigningParams(object):
NDN_SP_TEMPL_TIMESTAMP = 0x0001
NDN_SP_TEMPL_FINAL_BLOCK_ID = 0x0002
NDN_SP_TEMPL_FRESHNESS = 0x0004
NDN_SP_TEMPL_KEY_LOCATOR = 0x0008
NDN_SP_FINAL_BLOCK = 0x0010
NDN_SP_OMIT_KEY_LOCATOR = 0x0020
def __init__(self):
self.flags; # Use the NDN_SP flags above
self.type; # Content type, really should be somewhere else, it's not that related to signing
self.freshness;
# These three are only relevant, for now, if they are coming *from* a c object
# otherwise, API version is filled in from NDN_SIGNING_PARAMS_INIT and
# both template and key will come from the ContentObject's SignedInfo object
self.apiVersion;
self.template; # SignedInfo referred to by this content object,
self.key; # Key to use - this should filled by a lookup against content object's signedinfo,
# pyndn
self.ndn_data_dirty = False
self.ndn_data = None # backing ndn_signing_params
def __setattr__(self, name, value):
if name != "ndn_data" and name != "ndn_data_dirty":
self.ndn_data_dirty = True
object.__setattr__(self, name, value)
def __getattribute__(self, name):
if name == "ndn_data":
if object.__getattribute__(self, 'ndn_data_dirty'):
self.ndn_data = _pyndn._pyndn_SigningParams_to_ndn(self)
self.ndn_data_dirty = False
return object.__getattribute__(self, name)
def __get_ndn(self):
pass
# Call ndn_signed_info_create
| |
#!/usr/bin/env python
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
#
# analytics_uvetest.py
#
# UVE and Alarm tests
#
import sys
builddir = sys.path[0] + '/../..'
import threading
threading._DummyThread._Thread__stop = lambda x: 42
import signal
import gevent
from gevent import monkey
monkey.patch_all()
import os
import unittest
import testtools
import fixtures
import socket
from utils.util import obj_to_dict
from utils.analytics_fixture import AnalyticsFixture
from utils.generator_fixture import GeneratorFixture
from mockredis import mockredis
from mockzoo import mockzoo
import logging
import time
from opserver.sandesh.viz.constants import *
from opserver.sandesh.viz.constants import _OBJECT_TABLES
from sandesh_common.vns.ttypes import Module
from sandesh_common.vns.constants import ModuleNames
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
class AnalyticsUveTest(testtools.TestCase, fixtures.TestWithFixtures):
@classmethod
def setUpClass(cls):
if AnalyticsUveTest._check_skip_test() is True:
return
if (os.getenv('LD_LIBRARY_PATH', '').find('build/lib') < 0):
if (os.getenv('DYLD_LIBRARY_PATH', '').find('build/lib') < 0):
assert(False)
cls.redis_port = AnalyticsUveTest.get_free_port()
mockredis.start_redis(
cls.redis_port, builddir+'/testroot/bin/redis-server')
cls.zk_port = AnalyticsUveTest.get_free_port()
mockzoo.start_zoo(cls.zk_port)
@classmethod
def tearDownClass(cls):
if AnalyticsUveTest._check_skip_test() is True:
return
mockredis.stop_redis(cls.redis_port)
mockzoo.stop_zoo(cls.zk_port)
#@unittest.skip('Skipping non-cassandra test with vizd')
def test_00_nocassandra(self):
'''
This test starts redis,vizd,opserver and qed
Then it checks that the collector UVE (via redis)
can be accessed from opserver.
'''
logging.info("*** test_00_nocassandra ***")
if AnalyticsUveTest._check_skip_test() is True:
return True
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, self.__class__.redis_port, 0))
assert vizd_obj.verify_on_setup()
return True
# end test_00_nocassandra
#@unittest.skip('Skipping VM UVE test')
def test_01_vm_uve(self):
'''
This test starts redis, vizd, opserver, qed, and a python generator
that simulates vrouter and sends UveVirtualMachineAgentTrace messages.
Then it checks that the VM UVE (via redis) can be accessed from
opserver.
'''
logging.info("*** test_01_vm_uve ***")
if AnalyticsUveTest._check_skip_test() is True:
return True
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, self.__class__.redis_port, 0))
assert vizd_obj.verify_on_setup()
collectors = [vizd_obj.get_collector()]
generator_obj = self.useFixture(
GeneratorFixture("contrail-vrouter-agent", collectors,
logging, vizd_obj.get_opserver_port()))
assert generator_obj.verify_on_setup()
generator_obj.send_vm_uve(vm_id='abcd',
num_vm_ifs=5,
msg_count=5)
assert generator_obj.verify_vm_uve(vm_id='abcd',
num_vm_ifs=5,
msg_count=5)
# Delete the VM UVE and verify that the deleted flag is set
# in the UVE cache
generator_obj.delete_vm_uve('abcd')
assert generator_obj.verify_vm_uve_cache(vm_id='abcd', delete=True)
# Add the VM UVE with the same vm_id and verify that the deleted flag
# is cleared in the UVE cache
generator_obj.send_vm_uve(vm_id='abcd',
num_vm_ifs=5,
msg_count=5)
assert generator_obj.verify_vm_uve_cache(vm_id='abcd')
assert generator_obj.verify_vm_uve(vm_id='abcd',
num_vm_ifs=5,
msg_count=5)
# Generate VM with vm_id containing XML control character
generator_obj.send_vm_uve(vm_id='<abcd&>', num_vm_ifs=2, msg_count=2)
assert generator_obj.verify_vm_uve(vm_id='<abcd&>', num_vm_ifs=2,
msg_count=2)
return True
# end test_01_vm_uve
#@unittest.skip('Skipping VM UVE test')
def test_02_vm_uve_with_password(self):
'''
This test starts redis, vizd, opserver, qed, and a python generator
that simulates vrouter and sends UveVirtualMachineAgentTrace messages.
Then it checks that the VM UVE (via redis) can be accessed from
opserver.
'''
logging.info("*** test_02_vm_uve_with_password ***")
if AnalyticsUveTest._check_skip_test() is True:
return True
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, -1, 0,
redis_password='contrail'))
assert vizd_obj.verify_on_setup()
collectors = [vizd_obj.get_collector()]
generator_obj = self.useFixture(
GeneratorFixture("contrail-vrouter-agent", collectors,
logging, vizd_obj.get_opserver_port()))
assert generator_obj.verify_on_setup()
generator_obj.send_vm_uve(vm_id='abcd',
num_vm_ifs=5,
msg_count=5)
assert generator_obj.verify_vm_uve(vm_id='abcd',
num_vm_ifs=5,
msg_count=5)
return True
# end test_02_vm_uve_with_password
#@unittest.skip('verify redis-uve restart')
def test_03_redis_uve_restart(self):
logging.info('*** test_03_redis_uve_restart ***')
vizd_obj = self.useFixture(
AnalyticsFixture(logging,
builddir, -1, 0))
self.verify_uve_resync(vizd_obj)
# end test_03_redis_uve_restart
#@unittest.skip('verify redis-uve restart')
def test_04_redis_uve_restart_with_password(self):
logging.info('*** test_03_redis_uve_restart_with_password ***')
vizd_obj = self.useFixture(
AnalyticsFixture(logging,
builddir, -1, 0,
redis_password='contrail'))
self.verify_uve_resync(vizd_obj)
return True
# end test_04_redis_uve_restart
def verify_uve_resync(self, vizd_obj):
assert vizd_obj.verify_on_setup()
assert vizd_obj.verify_collector_redis_uve_connection(
vizd_obj.collectors[0])
assert vizd_obj.verify_opserver_redis_uve_connection(
vizd_obj.opserver)
# verify redis-uve list
host = socket.gethostname()
gen_list = [host+':Analytics:contrail-collector:0',
host+':Analytics:contrail-query-engine:0',
host+':Analytics:contrail-analytics-api:0']
assert vizd_obj.verify_generator_uve_list(gen_list)
# stop redis-uve
vizd_obj.redis_uves[0].stop()
assert vizd_obj.verify_collector_redis_uve_connection(
vizd_obj.collectors[0], False)
assert vizd_obj.verify_opserver_redis_uve_connection(
vizd_obj.opserver, False)
# start redis-uve and verify that contrail-collector and Opserver are
# connected to the redis-uve
vizd_obj.redis_uves[0].start()
assert vizd_obj.verify_collector_redis_uve_connection(
vizd_obj.collectors[0])
assert vizd_obj.verify_opserver_redis_uve_connection(
vizd_obj.opserver)
# verify that UVEs are resynced with redis-uve
assert vizd_obj.verify_generator_uve_list(gen_list)
#@unittest.skip('Skipping contrail-collector HA test')
def test_05_collector_ha(self):
logging.info('*** test_05_collector_ha ***')
if AnalyticsUveTest._check_skip_test() is True:
return True
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, -1, 0,
collector_ha_test=True))
assert vizd_obj.verify_on_setup()
# OpServer, AlarmGen and QE are started with collectors[0] as
# primary and collectors[1] as secondary
exp_genlist = ['contrail-collector', 'contrail-analytics-api',
'contrail-query-engine']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[0],
exp_genlist)
# start the contrail-vrouter-agent with collectors[1] as primary and
# collectors[0] as secondary
collectors = [vizd_obj.collectors[1].get_addr(),
vizd_obj.collectors[0].get_addr()]
vr_agent = self.useFixture(
GeneratorFixture("contrail-vrouter-agent", collectors,
logging, vizd_obj.get_opserver_port()))
assert vr_agent.verify_on_setup()
exp_genlist = ['contrail-collector', 'contrail-vrouter-agent']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[1],
exp_genlist)
# stop collectors[0] and verify that OpServer, AlarmGen and QE switch
# from primary to secondary collector
vizd_obj.collectors[0].stop()
exp_genlist = ['contrail-collector', 'contrail-vrouter-agent',
'contrail-analytics-api',
'contrail-query-engine']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[1],
exp_genlist)
# start collectors[0]
vizd_obj.collectors[0].start()
exp_genlist = ['contrail-collector']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[0],
exp_genlist)
# verify that the old UVEs are flushed from redis when collector restarts
exp_genlist = [vizd_obj.collectors[0].get_generator_id()]
assert vizd_obj.verify_generator_list_in_redis(\
vizd_obj.collectors[0].get_redis_uve(),
exp_genlist)
# stop collectors[1] and verify that OpServer, AlarmGen and QE switch
# from secondary to primary and contrail-vrouter-agent from primary to
# secondary
vizd_obj.collectors[1].stop()
exp_genlist = ['contrail-collector', 'contrail-vrouter-agent',
'contrail-analytics-api',
'contrail-query-engine']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[0],
exp_genlist)
# verify the generator list in redis
exp_genlist = [vizd_obj.collectors[0].get_generator_id(),
vr_agent.get_generator_id(),
vizd_obj.opserver.get_generator_id(),
vizd_obj.query_engine.get_generator_id()]
assert vizd_obj.verify_generator_list_in_redis(\
vizd_obj.collectors[0].get_redis_uve(),
exp_genlist)
# stop Opserver , AlarmGen and QE
vizd_obj.opserver.stop()
vizd_obj.query_engine.stop()
exp_genlist = ['contrail-collector', 'contrail-vrouter-agent']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[0],
exp_genlist)
# verify the generator list in redis
exp_genlist = [vizd_obj.collectors[0].get_generator_id(),
vr_agent.get_generator_id()]
assert vizd_obj.verify_generator_list_in_redis(\
vizd_obj.collectors[0].get_redis_uve(),
exp_genlist)
# start Opserver and QE with collectors[1] as the primary and
# collectors[0] as the secondary. On generator startup, verify
# that it connects to the secondary collector, if the
# connection to the primary fails
vizd_obj.opserver.set_primary_collector(
vizd_obj.collectors[1].get_addr())
vizd_obj.opserver.set_secondary_collector(
vizd_obj.collectors[0].get_addr())
vizd_obj.opserver.start()
vizd_obj.query_engine.set_primary_collector(
vizd_obj.collectors[1].get_addr())
vizd_obj.query_engine.set_secondary_collector(
vizd_obj.collectors[0].get_addr())
vizd_obj.query_engine.start()
exp_genlist = ['contrail-collector', 'contrail-vrouter-agent',
'contrail-analytics-api',
'contrail-query-engine']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[0],
exp_genlist)
# stop the collectors[0] - both collectors[0] and collectors[1] are down
# send the VM UVE and verify that the VM UVE is synced after connection
# to the collector
vizd_obj.collectors[0].stop()
# Make sure the connection to the collector is teared down before
# sending the VM UVE
while True:
if vr_agent.verify_on_setup() is False:
break
vr_agent.send_vm_uve(vm_id='abcd-1234-efgh-5678',
num_vm_ifs=5, msg_count=5)
vizd_obj.collectors[1].start()
exp_genlist = ['contrail-collector', 'contrail-vrouter-agent',
'contrail-analytics-api',
'contrail-query-engine']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[1],
exp_genlist)
assert vr_agent.verify_vm_uve(vm_id='abcd-1234-efgh-5678',
num_vm_ifs=5, msg_count=5)
# end test_05_collector_ha
#@unittest.skip('Skipping AlarmGen basic test')
def test_06_alarmgen_basic(self):
'''
This test starts the analytics processes.
It enables partition 0 on alarmgen, and confirms
that it got enabled
'''
logging.info("*** test_06_alarmgen_basic ***")
if AnalyticsUveTest._check_skip_test() is True:
return True
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, self.__class__.redis_port, 0,
kafka_zk = self.__class__.zk_port))
assert vizd_obj.verify_on_setup()
assert(vizd_obj.set_alarmgen_partition(0,1) == 'true')
assert(vizd_obj.verify_alarmgen_partition(0,'true'))
assert(vizd_obj.set_alarmgen_partition(1,1) == 'true')
assert(vizd_obj.set_alarmgen_partition(2,1) == 'true')
assert(vizd_obj.set_alarmgen_partition(3,1) == 'true')
assert(vizd_obj.verify_uvetable_alarm("ObjectCollectorInfo",
"ObjectCollectorInfo:" + socket.gethostname(), "ProcessStatus"))
# setup generator for sending Vrouter build_info
collector = vizd_obj.collectors[0].get_addr()
alarm_gen1 = self.useFixture(
GeneratorFixture('vrouter-agent', [collector], logging,
None, hostname=socket.gethostname()))
alarm_gen1.verify_on_setup()
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
None, None))
# send vrouter UVE without build_info !!!
# check for PartialSysinfo alarm
alarm_gen1.send_vrouterinfo("myvrouter")
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter", "PartialSysinfoCompute"))
# Now try to clear the alarm by sending build_info
alarm_gen1.send_vrouterinfo("myvrouter", b_info = True)
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter", "PartialSysinfoCompute", is_set = False))
# send vrouter UVE without build_info !!!
# check for PartialSysinfo alarm
alarm_gen1.send_vrouterinfo("myvrouter1")
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter1", "PartialSysinfoCompute"))
# Now try to clear the alarm by deleting the UVE
alarm_gen1.send_vrouterinfo("myvrouter1", deleted = True)
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter1", "PartialSysinfoCompute", is_set = False))
alarm_gen2 = self.useFixture(
GeneratorFixture('vrouter-agent', [collector], logging,
None, hostname=socket.gethostname(), inst = "1"))
alarm_gen2.verify_on_setup()
# send vrouter UVE without build_info !!!
# check for PartialSysinfo alarm
alarm_gen2.send_vrouterinfo("myvrouter2")
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter2", "PartialSysinfoCompute"))
# Now try to clear the alarm by deleting the Generator
#del alarm_gen1
alarm_gen2.cleanUp()
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
"ObjectVRouter:myvrouter2", "PartialSysinfoCompute", is_set = False))
# Verify that we can give up partition ownership
assert(vizd_obj.set_alarmgen_partition(0,0) == 'true')
assert(vizd_obj.verify_alarmgen_partition(0,'false'))
# Give up the other partitions
assert(vizd_obj.set_alarmgen_partition(1,0) == 'true')
assert(vizd_obj.set_alarmgen_partition(2,0) == 'true')
assert(vizd_obj.set_alarmgen_partition(3,0) == 'true')
# Confirm that alarms are all gone
assert(vizd_obj.verify_uvetable_alarm("ObjectVRouter",
None, None))
return True
# end test_06_alarmgen_basic
#@unittest.skip('Skipping Alarm test')
def test_07_alarm(self):
'''
This test starts redis, collectors, analytics-api and
python generators that simulates alarm generator. This
test sends alarms from alarm generators and verifies the
retrieval of alarms from analytics-api.
'''
logging.info('*** test_07_alarm ***')
# collector_ha_test flag is set to True, because we wanna test
# retrieval of alarms across multiple redis servers.
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, -1, 0,
collector_ha_test=True))
assert vizd_obj.verify_on_setup()
# create alarm-generator and attach it to the first collector.
collectors = [vizd_obj.collectors[0].get_addr(),
vizd_obj.collectors[1].get_addr()]
alarm_gen1 = self.useFixture(
GeneratorFixture('contrail-alarm-gen', [collectors[0]], logging,
None, hostname=socket.gethostname()+'_1'))
alarm_gen1.verify_on_setup()
# send process state alarm for analytics-node
alarms = alarm_gen1.create_process_state_alarm(
'contrail-query-engine')
alarms += alarm_gen1.create_process_state_alarm(
'contrail-snmp-collector')
alarm_gen1.send_alarm(socket.gethostname()+'_1', alarms,
COLLECTOR_INFO_TABLE)
analytics_tbl = _OBJECT_TABLES[COLLECTOR_INFO_TABLE].log_query_name
# send proces state alarm for control-node
alarms = alarm_gen1.create_process_state_alarm('contrail-dns')
alarm_gen1.send_alarm('<&'+socket.gethostname()+'_1>', alarms,
BGP_ROUTER_TABLE)
control_tbl = _OBJECT_TABLES[BGP_ROUTER_TABLE].log_query_name
# create another alarm-generator and attach it to the second collector.
alarm_gen2 = self.useFixture(
GeneratorFixture('contrail-alarm-gen', [collectors[1]], logging,
None, hostname=socket.gethostname()+'_2'))
alarm_gen2.verify_on_setup()
# send process state alarm for analytics-node
alarms = alarm_gen2.create_process_state_alarm(
'contrail-topology')
alarm_gen2.send_alarm(socket.gethostname()+'_2', alarms,
COLLECTOR_INFO_TABLE)
keys = [socket.gethostname()+'_1', socket.gethostname()+'_2']
assert(vizd_obj.verify_alarm_list(analytics_tbl,
expected_alarms=keys))
assert(vizd_obj.verify_alarm(analytics_tbl, keys[0], obj_to_dict(
alarm_gen1.alarms[COLLECTOR_INFO_TABLE][keys[0]].data)))
assert(vizd_obj.verify_alarm(analytics_tbl, keys[1], obj_to_dict(
alarm_gen2.alarms[COLLECTOR_INFO_TABLE][keys[1]].data)))
keys = ['<&'+socket.gethostname()+'_1>']
assert(vizd_obj.verify_alarm_list(control_tbl, expected_alarms=keys))
assert(vizd_obj.verify_alarm(control_tbl, keys[0], obj_to_dict(
alarm_gen1.alarms[BGP_ROUTER_TABLE][keys[0]].data)))
# delete analytics-node alarm generated by alarm_gen2
alarm_gen2.delete_alarm(socket.gethostname()+'_2',
COLLECTOR_INFO_TABLE)
# verify analytics-node alarms
keys = [socket.gethostname()+'_1']
assert(vizd_obj.verify_alarm_list(analytics_tbl, expected_alarms=keys))
assert(vizd_obj.verify_alarm(analytics_tbl, keys[0], obj_to_dict(
alarm_gen1.alarms[COLLECTOR_INFO_TABLE][keys[0]].data)))
assert(vizd_obj.verify_alarm(analytics_tbl,
socket.gethostname()+'_2', {}))
# Disconnect alarm_gen1 from Collector and verify that all
# alarms generated by alarm_gen1 is removed by the Collector.
alarm_gen1.disconnect_from_collector()
assert(vizd_obj.verify_alarm_list(analytics_tbl, expected_alarms=[]))
assert(vizd_obj.verify_alarm(analytics_tbl,
socket.gethostname()+'_1', {}))
assert(vizd_obj.verify_alarm_list(control_tbl, expected_alarms=[]))
assert(vizd_obj.verify_alarm(control_tbl,
'<&'+socket.gethostname()+'_1', {}))
# update analytics-node alarm in disconnect state
alarms = alarm_gen1.create_process_state_alarm(
'contrail-snmp-collector')
alarm_gen1.send_alarm(socket.gethostname()+'_1', alarms,
COLLECTOR_INFO_TABLE)
# Connect alarm_gen1 to Collector and verify that all
# alarms generated by alarm_gen1 is synced with Collector.
alarm_gen1.connect_to_collector()
keys = [socket.gethostname()+'_1']
assert(vizd_obj.verify_alarm_list(analytics_tbl, expected_alarms=keys))
assert(vizd_obj.verify_alarm(analytics_tbl, keys[0], obj_to_dict(
alarm_gen1.alarms[COLLECTOR_INFO_TABLE][keys[0]].data)))
keys = ['<&'+socket.gethostname()+'_1>']
assert(vizd_obj.verify_alarm_list(control_tbl, expected_alarms=keys))
assert(vizd_obj.verify_alarm(control_tbl, keys[0], obj_to_dict(
alarm_gen1.alarms[BGP_ROUTER_TABLE][keys[0]].data)))
# end test_07_alarm
#@unittest.skip('Skipping UVE/Alarm Filter test')
def test_08_uve_alarm_filter(self):
'''
This test verifies the filter options kfilt, sfilt, mfilt and cfilt
in the UVE/Alarm GET and POST methods.
'''
logging.info('*** test_08_uve_alarm_filter ***')
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, -1, 0, collector_ha_test=True))
assert vizd_obj.verify_on_setup()
collectors = [vizd_obj.collectors[0].get_addr(),
vizd_obj.collectors[1].get_addr()]
api_server_name = socket.gethostname()+'_1'
api_server = self.useFixture(
GeneratorFixture('contrail-api', [collectors[0]], logging,
None, node_type='Config',
hostname=api_server_name))
vr_agent_name = socket.gethostname()+'_2'
vr_agent = self.useFixture(
GeneratorFixture('contrail-vrouter-agent', [collectors[1]],
logging, None, node_type='Compute',
hostname=vr_agent_name))
alarm_gen1_name = socket.gethostname()+'_1'
alarm_gen1 = self.useFixture(
GeneratorFixture('contrail-alarm-gen', [collectors[0]], logging,
None, node_type='Analytics',
hostname=alarm_gen1_name))
alarm_gen2_name = socket.gethostname()+'_3'
alarm_gen2 = self.useFixture(
GeneratorFixture('contrail-alarm-gen', [collectors[1]], logging,
None, node_type='Analytics',
hostname=alarm_gen2_name))
api_server.verify_on_setup()
vr_agent.verify_on_setup()
alarm_gen1.verify_on_setup()
alarm_gen2.verify_on_setup()
vn_list = ['default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:vn1']
# generate UVEs for the filter test
api_server.send_vn_config_uve(name=vn_list[0],
partial_conn_nw=[vn_list[1]],
num_acl_rules=2)
api_server.send_vn_config_uve(name=vn_list[1],
num_acl_rules=3)
vr_agent.send_vn_agent_uve(name=vn_list[1], num_acl_rules=3,
ipkts=2, ibytes=1024)
vr_agent.send_vn_agent_uve(name=vn_list[2], ipkts=4, ibytes=128)
# generate Alarms for the filter test
alarms = alarm_gen1.create_alarm('InPktsThreshold',
'UveVirtualNetworkAgent.in_tpkts < 2',
'UveVirtualNetworkAgent.in_tpkts == 2')
alarms += alarm_gen1.create_alarm('InBytesThreshold',
'UveVirtualNetworkAgent.in_bytes < 512',
'UveVirtualNetworkAgent.in_bytes == 1024', ack=True)
alarm_gen1.send_alarm(vn_list[1], alarms, VN_TABLE)
alarms = alarm_gen2.create_alarm('ConfigNotPresent',
'UveVirtualNetworkConfig != False',
'UveVirtualNetworkConfig == False', ack=False)
alarm_gen2.send_alarm(vn_list[2], alarms, VN_TABLE)
filt_test = [
# no filter
{
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:vn1'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
},
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
'description': [
{
'rule': 'UveVirtualNetworkAgent.in_tpkts < 2',
'value': 'UveVirtualNetworkAgent.in_tpkts == 2'
}
]
},
{
'type': 'InBytesThreshold',
'description': [
{
'rule': 'UveVirtualNetworkAgent.in_bytes < 512',
'value': 'UveVirtualNetworkAgent.in_bytes == 1024',
}
],
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'description': [
{
'rule': 'UveVirtualNetworkConfig != False',
'value': 'UveVirtualNetworkConfig == False'
}
],
'ack': False
}
]
}
}
}
]
},
'alarm_list_get': [
'default-domain:project1:vn2',
'default-domain:project2:vn1'
],
'alarm_get_post': {
'value': [
{
'name': 'default-domain:project1:vn2',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
'description': [
{
'rule': 'UveVirtualNetworkAgent.in_tpkts < 2',
'value': 'UveVirtualNetworkAgent.in_tpkts == 2'
}
]
},
{
'type': 'InBytesThreshold',
'description': [
{
'rule': 'UveVirtualNetworkAgent.in_bytes < 512',
'value': 'UveVirtualNetworkAgent.in_bytes == 1024',
}
],
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'description': [
{
'rule': 'UveVirtualNetworkConfig != False',
'value': 'UveVirtualNetworkConfig == False'
}
],
'ack': False
}
]
}
}
}
]
}
},
# kfilt
{
'kfilt': ['*'],
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:vn1'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
},
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
'description': [
{
'rule': 'UveVirtualNetworkAgent.in_tpkts < 2',
'value': 'UveVirtualNetworkAgent.in_tpkts == 2'
}
]
},
{
'type': 'InBytesThreshold',
'description': [
{
'rule': 'UveVirtualNetworkAgent.in_bytes < 512',
'value': 'UveVirtualNetworkAgent.in_bytes == 1024',
}
],
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'description': [
{
'rule': 'UveVirtualNetworkConfig != False',
'value': 'UveVirtualNetworkConfig == False'
}
],
'ack': False
}
]
}
}
}
]
},
'alarm_list_get': [
'default-domain:project1:vn2',
'default-domain:project2:vn1'
],
'alarm_get_post': {
'value': [
{
'name': 'default-domain:project1:vn2',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
'description': [
{
'rule': 'UveVirtualNetworkAgent.in_tpkts < 2',
'value': 'UveVirtualNetworkAgent.in_tpkts == 2'
}
]
},
{
'type': 'InBytesThreshold',
'description': [
{
'rule': 'UveVirtualNetworkAgent.in_bytes < 512',
'value': 'UveVirtualNetworkAgent.in_bytes == 1024',
}
],
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'description': [
{
'rule': 'UveVirtualNetworkConfig != False',
'value': 'UveVirtualNetworkConfig == False'
}
],
'ack': False
}
]
}
}
}
]
}
},
{
'kfilt': ['default-domain:project1:*',
'default-domain:project2:*'],
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:vn1'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
},
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
'description': [
{
'rule': 'UveVirtualNetworkAgent.in_tpkts < 2',
'value': 'UveVirtualNetworkAgent.in_tpkts == 2'
}
]
},
{
'type': 'InBytesThreshold',
'description': [
{
'rule': 'UveVirtualNetworkAgent.in_bytes < 512',
'value': 'UveVirtualNetworkAgent.in_bytes == 1024',
}
],
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'description': [
{
'rule': 'UveVirtualNetworkConfig != False',
'value': 'UveVirtualNetworkConfig == False'
}
],
'ack': False
}
]
}
}
}
]
},
'alarm_list_get': [
'default-domain:project1:vn2',
'default-domain:project2:vn1'
],
'alarm_get_post': {
'value': [
{
'name': 'default-domain:project1:vn2',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
'description': [
{
'rule': 'UveVirtualNetworkAgent.in_tpkts < 2',
'value': 'UveVirtualNetworkAgent.in_tpkts == 2'
}
]
},
{
'type': 'InBytesThreshold',
'description': [
{
'rule': 'UveVirtualNetworkAgent.in_bytes < 512',
'value': 'UveVirtualNetworkAgent.in_bytes == 1024',
}
],
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'description': [
{
'rule': 'UveVirtualNetworkConfig != False',
'value': 'UveVirtualNetworkConfig == False'
}
],
'ack': False
}
]
}
}
}
]
}
},
{
'kfilt': ['default-domain:project1:vn1',
'default-domain:project2:*'],
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project2:vn1'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'description': [
{
'rule': 'UveVirtualNetworkConfig != False',
'value': 'UveVirtualNetworkConfig == False'
}
],
'ack': False
}
]
}
}
}
]
},
'alarm_list_get': [
'default-domain:project2:vn1'
],
'alarm_get_post': {
'value': [
{
'name': 'default-domain:project2:vn1',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'description': [
{
'rule': 'UveVirtualNetworkConfig != False',
'value': 'UveVirtualNetworkConfig == False'
}
],
'ack': False
}
]
}
}
}
]
}
},
{
'kfilt': [
'default-domain:project2:*',
'invalid-vn:*'
],
'uve_list_get': [
'default-domain:project2:vn1'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'description': [
{
'rule': 'UveVirtualNetworkConfig != False',
'value': 'UveVirtualNetworkConfig == False'
}
],
'ack': False
}
]
}
}
}
]
},
'alarm_list_get': [
'default-domain:project2:vn1'
],
'alarm_get_post': {
'value': [
{
'name': 'default-domain:project2:vn1',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'description': [
{
'rule': 'UveVirtualNetworkConfig != False',
'value': 'UveVirtualNetworkConfig == False'
}
],
'ack': False
}
]
}
}
}
]
}
},
{
'kfilt': [
'default-domain:project1:vn2',
'default-domain:project2:vn1',
'invalid-vn'
],
'uve_list_get': [
'default-domain:project1:vn2',
'default-domain:project2:vn1'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
},
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
'description': [
{
'rule': 'UveVirtualNetworkAgent.in_tpkts < 2',
'value': 'UveVirtualNetworkAgent.in_tpkts == 2'
}
]
},
{
'type': 'InBytesThreshold',
'description': [
{
'rule': 'UveVirtualNetworkAgent.in_bytes < 512',
'value': 'UveVirtualNetworkAgent.in_bytes == 1024',
}
],
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'description': [
{
'rule': 'UveVirtualNetworkConfig != False',
'value': 'UveVirtualNetworkConfig == False'
}
],
'ack': False
}
]
}
}
}
]
},
'alarm_list_get': [
'default-domain:project1:vn2',
'default-domain:project2:vn1'
],
'alarm_get_post': {
'value': [
{
'name': 'default-domain:project1:vn2',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
'description': [
{
'rule': 'UveVirtualNetworkAgent.in_tpkts < 2',
'value': 'UveVirtualNetworkAgent.in_tpkts == 2'
}
]
},
{
'type': 'InBytesThreshold',
'description': [
{
'rule': 'UveVirtualNetworkAgent.in_bytes < 512',
'value': 'UveVirtualNetworkAgent.in_bytes == 1024',
}
],
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'description': [
{
'rule': 'UveVirtualNetworkConfig != False',
'value': 'UveVirtualNetworkConfig == False'
}
],
'ack': False
}
]
}
}
}
]
}
},
{
'kfilt': ['invalid-vn'],
'uve_list_get': [],
'uve_get_post': {'value': []},
'alarm_list_get': [],
'alarm_get_post': {'value': []}
},
# sfilt
{
'sfilt': socket.gethostname()+'_1',
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
'description': [
{
'rule': 'UveVirtualNetworkAgent.in_tpkts < 2',
'value': 'UveVirtualNetworkAgent.in_tpkts == 2'
}
]
},
{
'type': 'InBytesThreshold',
'description': [
{
'rule': 'UveVirtualNetworkAgent.in_bytes < 512',
'value': 'UveVirtualNetworkAgent.in_bytes == 1024',
}
],
'ack': True
}
]
}
}
}
]
},
'alarm_list_get': [
'default-domain:project1:vn2',
],
'alarm_get_post': {
'value': [
{
'name': 'default-domain:project1:vn2',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
'description': [
{
'rule': 'UveVirtualNetworkAgent.in_tpkts < 2',
'value': 'UveVirtualNetworkAgent.in_tpkts == 2'
}
]
},
{
'type': 'InBytesThreshold',
'description': [
{
'rule': 'UveVirtualNetworkAgent.in_bytes < 512',
'value': 'UveVirtualNetworkAgent.in_bytes == 1024',
}
],
'ack': True
}
]
}
}
}
]
}
},
{
'sfilt': socket.gethostname()+'_3',
'uve_list_get': [
'default-domain:project2:vn1'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project2:vn1',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'description': [
{
'rule': 'UveVirtualNetworkConfig != False',
'value': 'UveVirtualNetworkConfig == False'
}
],
'ack': False
}
]
}
}
}
]
},
'alarm_list_get': [
'default-domain:project2:vn1'
],
'alarm_get_post': {
'value': [
{
'name': 'default-domain:project2:vn1',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'description': [
{
'rule': 'UveVirtualNetworkConfig != False',
'value': 'UveVirtualNetworkConfig == False'
}
],
'ack': False
}
]
}
}
}
]
}
},
{
'sfilt': 'invalid_source',
'uve_list_get': [],
'uve_get_post': {'value': []},
'alarm_list_get': [],
'alarm_get_post': {'value': []}
},
# mfilt
{
'mfilt': 'Config:contrail-api:0',
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
}
}
}
]
},
'alarm_list_get': [],
'alarm_get_post': {'value': []}
},
{
'mfilt': 'Analytics:contrail-alarm-gen:0',
'uve_list_get': [
'default-domain:project1:vn2',
'default-domain:project2:vn1'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn2',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
'description': [
{
'rule': 'UveVirtualNetworkAgent.in_tpkts < 2',
'value': 'UveVirtualNetworkAgent.in_tpkts == 2'
}
]
},
{
'type': 'InBytesThreshold',
'description': [
{
'rule': 'UveVirtualNetworkAgent.in_bytes < 512',
'value': 'UveVirtualNetworkAgent.in_bytes == 1024',
}
],
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'description': [
{
'rule': 'UveVirtualNetworkConfig != False',
'value': 'UveVirtualNetworkConfig == False'
}
],
'ack': False
}
]
}
}
}
]
},
'alarm_list_get': [
'default-domain:project1:vn2',
'default-domain:project2:vn1'
],
'alarm_get_post': {
'value': [
{
'name': 'default-domain:project1:vn2',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
'description': [
{
'rule': 'UveVirtualNetworkAgent.in_tpkts < 2',
'value': 'UveVirtualNetworkAgent.in_tpkts == 2'
}
]
},
{
'type': 'InBytesThreshold',
'description': [
{
'rule': 'UveVirtualNetworkAgent.in_bytes < 512',
'value': 'UveVirtualNetworkAgent.in_bytes == 1024',
}
],
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'description': [
{
'rule': 'UveVirtualNetworkConfig != False',
'value': 'UveVirtualNetworkConfig == False'
}
],
'ack': False
}
]
}
}
}
]
}
},
{
'mfilt': 'Analytics:contrail-invalid:0',
'uve_list_get': [],
'uve_get_post': {'value': []},
'alarm_list_get': [],
'alarm_get_post': {'value': []}
},
# cfilt
{
'cfilt': ['UveVirtualNetworkAgent'],
'uve_list_get': [
'default-domain:project1:vn2',
'default-domain:project2:vn1'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
}
}
}
]
},
'alarm_list_get': [],
'alarm_get_post': {'value': []}
},
{
'cfilt': [
'UveVirtualNetworkAgent:total_acl_rules',
'UveVirtualNetworkConfig:partially_connected_networks'
],
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
]
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'total_acl_rules': 3
}
}
}
]
},
'alarm_list_get': [],
'alarm_get_post': {'value': []}
},
{
'cfilt': [
'UveVirtualNetworkConfig:invalid',
'UveVirtualNetworkAgent:in_tpkts',
'UVEAlarms:alarms'
],
'uve_list_get': [
'default-domain:project1:vn2',
'default-domain:project2:vn1'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
},
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
'description': [
{
'rule': 'UveVirtualNetworkAgent.in_tpkts < 2',
'value': 'UveVirtualNetworkAgent.in_tpkts == 2'
}
]
},
{
'type': 'InBytesThreshold',
'description': [
{
'rule': 'UveVirtualNetworkAgent.in_bytes < 512',
'value': 'UveVirtualNetworkAgent.in_bytes == 1024',
}
],
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'description': [
{
'rule': 'UveVirtualNetworkConfig != False',
'value': 'UveVirtualNetworkConfig == False'
}
],
'ack': False
}
]
}
}
}
]
},
'alarm_list_get': [
'default-domain:project1:vn2',
'default-domain:project2:vn1'
],
'alarm_get_post': {
'value': [
{
'name': 'default-domain:project1:vn2',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
'description': [
{
'rule': 'UveVirtualNetworkAgent.in_tpkts < 2',
'value': 'UveVirtualNetworkAgent.in_tpkts == 2'
}
]
},
{
'type': 'InBytesThreshold',
'description': [
{
'rule': 'UveVirtualNetworkAgent.in_bytes < 512',
'value': 'UveVirtualNetworkAgent.in_bytes == 1024',
}
],
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'description': [
{
'rule': 'UveVirtualNetworkConfig != False',
'value': 'UveVirtualNetworkConfig == False'
}
],
'ack': False
}
]
}
}
}
]
}
},
{
'cfilt': [
'UveVirtualNetworkAgent:invalid',
'UVEAlarms:invalid_alarms',
'invalid'
],
'uve_list_get': [],
'uve_get_post': {'value': []},
'alarm_list_get': [],
'alarm_get_post': {'value': []}
},
# ackfilt
{
'ackfilt': True,
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:vn1'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
},
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
'UVEAlarms': {
'alarms': [
{
'type': 'InBytesThreshold',
'description': [
{
'rule': 'UveVirtualNetworkAgent.in_bytes < 512',
'value': 'UveVirtualNetworkAgent.in_bytes == 1024',
}
],
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
},
'UVEAlarms': {
}
}
}
]
},
'alarm_list_get': [
'default-domain:project1:vn2',
'default-domain:project2:vn1'
],
'alarm_get_post': {
'value': [
{
'name': 'default-domain:project1:vn2',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'InBytesThreshold',
'description': [
{
'rule': 'UveVirtualNetworkAgent.in_bytes < 512',
'value': 'UveVirtualNetworkAgent.in_bytes == 1024',
}
],
'ack': True
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UVEAlarms': {
}
}
}
]
}
},
{
'ackfilt': False,
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:vn1'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
},
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
'description': [
{
'rule': 'UveVirtualNetworkAgent.in_tpkts < 2',
'value': 'UveVirtualNetworkAgent.in_tpkts == 2'
}
]
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
},
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'description': [
{
'rule': 'UveVirtualNetworkConfig != False',
'value': 'UveVirtualNetworkConfig == False'
}
],
'ack': False
}
]
}
}
}
]
},
'alarm_list_get': [
'default-domain:project1:vn2',
'default-domain:project2:vn1'
],
'alarm_get_post': {
'value': [
{
'name': 'default-domain:project1:vn2',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
'description': [
{
'rule': 'UveVirtualNetworkAgent.in_tpkts < 2',
'value': 'UveVirtualNetworkAgent.in_tpkts == 2'
}
]
}
]
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'description': [
{
'rule': 'UveVirtualNetworkConfig != False',
'value': 'UveVirtualNetworkConfig == False'
}
],
'ack': False
}
]
}
}
}
]
}
},
# kfilt + sfilt
{
'kfilt': [
'default-domain:project1:*',
'default-domain:project2:vn1',
'default-domain:invalid'
],
'sfilt': socket.gethostname()+'_2',
'uve_list_get': [
'default-domain:project1:vn2',
'default-domain:project2:vn1'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 2,
'in_bytes': 1024,
'total_acl_rules': 3
}
}
},
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
}
}
}
]
},
'alarm_list_get': [],
'alarm_get_post': {'value': []}
},
# kfilt + sfilt + ackfilt
{
'kfilt': [
'default-domain:project1:vn1',
'default-domain:project2:*',
'default-domain:invalid'
],
'sfilt': socket.gethostname()+'_2',
'ackfilt': True,
'uve_list_get': [
'default-domain:project2:vn1'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project2:vn1',
'value': {
'UveVirtualNetworkAgent': {
'in_tpkts': 4,
'in_bytes': 128
}
}
}
]
},
'alarm_list_get': [],
'alarm_get_post': {'value': []}
},
# kfilt + sfilt + cfilt
{
'kfilt': [
'default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:vn1'
],
'sfilt': socket.gethostname()+'_1',
'cfilt': [
'UveVirtualNetworkAgent',
'UVEAlarms',
'UveVirtualNetworkConfig:Invalid'
],
'uve_list_get': [
'default-domain:project1:vn2'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn2',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
'description': [
{
'rule': 'UveVirtualNetworkAgent.in_tpkts < 2',
'value': 'UveVirtualNetworkAgent.in_tpkts == 2'
}
]
},
{
'type': 'InBytesThreshold',
'description': [
{
'rule': 'UveVirtualNetworkAgent.in_bytes < 512',
'value': 'UveVirtualNetworkAgent.in_bytes == 1024',
}
],
'ack': True
}
]
}
}
}
]
},
'alarm_list_get': [
'default-domain:project1:vn2'
],
'alarm_get_post': {
'value': [
{
'name': 'default-domain:project1:vn2',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'InPktsThreshold',
'description': [
{
'rule': 'UveVirtualNetworkAgent.in_tpkts < 2',
'value': 'UveVirtualNetworkAgent.in_tpkts == 2'
}
]
},
{
'type': 'InBytesThreshold',
'description': [
{
'rule': 'UveVirtualNetworkAgent.in_bytes < 512',
'value': 'UveVirtualNetworkAgent.in_bytes == 1024',
}
],
'ack': True
}
]
}
}
}
]
}
},
# kfilt + mfilt + cfilt
{
'kfilt': ['*'],
'mfilt': 'Config:contrail-api:0',
'cfilt': [
'UveVirtualNetworkAgent',
'UVEAlarms:alarms'
],
'uve_list_get': [],
'uve_get_post': {'value': []},
'alarm_list_get': [],
'alarm_get_post': {'value': []}
},
# kfilt + sfilt + mfilt + cfilt
{
'kfilt': [
'default-domain:project1:vn1',
'default-domain:project1:vn2',
'default-domain:project2:*'
],
'sfilt': socket.gethostname()+'_1',
'mfilt': 'Config:contrail-api:0',
'cfilt': [
'UveVirtualNetworkConfig:partially_connected_networks',
'UveVirtualNetworkConfig:total_acl_rules',
'UVEAlarms'
],
'uve_list_get': [
'default-domain:project1:vn1',
'default-domain:project1:vn2'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project1:vn1',
'value': {
'UveVirtualNetworkConfig': {
'partially_connected_networks': [
'default-domain:project1:vn2'
],
'total_acl_rules': 2
}
}
},
{
'name': 'default-domain:project1:vn2',
'value': {
'UveVirtualNetworkConfig': {
'total_acl_rules': 3
},
}
}
]
},
'alarm_list_get': [],
'alarm_get_post': {'value': []}
},
{
'kfilt': [
'default-domain:project1:*',
'default-domain:project2:vn1',
'default-domain:project2:invalid'
],
'sfilt': socket.gethostname()+'_3',
'mfilt': 'Analytics:contrail-alarm-gen:0',
'cfilt': [
'UveVirtualNetworkConfig',
'UVEAlarms:alarms',
'UveVirtualNetworkAgent'
],
'uve_list_get': [
'default-domain:project2:vn1'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project2:vn1',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'description': [
{
'rule': 'UveVirtualNetworkConfig != False',
'value': 'UveVirtualNetworkConfig == False'
}
],
'ack': False
}
]
}
}
}
]
},
'alarm_list_get': [
'default-domain:project2:vn1'
],
'alarm_get_post': {
'value': [
{
'name': 'default-domain:project2:vn1',
'value': {
'UVEAlarms': {
'alarms': [
{
'type': 'ConfigNotPresent',
'description': [
{
'rule': 'UveVirtualNetworkConfig != False',
'value': 'UveVirtualNetworkConfig == False'
}
],
'ack': False
}
]
}
}
}
]
}
},
# kfilt + sfilt + mfilt + cfilt + ackfilt
{
'kfilt': [
'default-domain:project1:*',
'default-domain:project2:vn1',
'default-domain:project2:invalid'
],
'sfilt': socket.gethostname()+'_3',
'mfilt': 'Analytics:contrail-alarm-gen:0',
'cfilt': [
'UveVirtualNetworkConfig',
'UVEAlarms:alarms',
'UveVirtualNetworkAgent'
],
'ackfilt': True,
'uve_list_get': [
'default-domain:project2:vn1'
],
'uve_get_post': {
'value': [
{
'name': 'default-domain:project2:vn1',
'value': {
'UVEAlarms': {
}
}
}
]
},
'alarm_list_get': [
'default-domain:project2:vn1'
],
'alarm_get_post': {
'value': [
{
'name': 'default-domain:project2:vn1',
'value': {
'UVEAlarms': {
}
}
}
]
}
}
]
vn_table = _OBJECT_TABLES[VN_TABLE].log_query_name
for i in range(len(filt_test)):
filters = dict(kfilt=filt_test[i].get('kfilt'),
sfilt=filt_test[i].get('sfilt'),
mfilt=filt_test[i].get('mfilt'),
cfilt=filt_test[i].get('cfilt'),
ackfilt=filt_test[i].get('ackfilt'))
assert(vizd_obj.verify_uve_list(vn_table,
filts=filters, exp_uve_list=filt_test[i]['uve_list_get']))
assert(vizd_obj.verify_multi_uve_get(vn_table,
filts=filters, exp_uves=filt_test[i]['uve_get_post']))
assert(vizd_obj.verify_uve_post(vn_table,
filts=filters, exp_uves=filt_test[i]['uve_get_post']))
assert(vizd_obj.verify_alarm_list(vn_table,
filts=filters, expected_alarms=filt_test[i]['alarm_list_get']))
assert(vizd_obj.verify_multi_alarm_get(vn_table,
filts=filters, exp_alarms=filt_test[i]['alarm_get_post']))
assert(vizd_obj.verify_alarm_post(vn_table,
filts=filters, exp_alarms=filt_test[i]['alarm_get_post']))
# end test_08_uve_alarm_filter
@staticmethod
def get_free_port():
cs = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
cs.bind(("", 0))
cport = cs.getsockname()[1]
cs.close()
return cport
@staticmethod
def _check_skip_test():
if (socket.gethostname() == 'build01'):
logging.info("Skipping test")
return True
return False
def _term_handler(*_):
raise IntSignal()
if __name__ == '__main__':
gevent.signal(signal.SIGINT,_term_handler)
unittest.main(catchbreak=True)
| |
"""
PHP date() style date formatting
See http://www.php.net/date for format strings
Usage:
>>> import datetime
>>> d = datetime.datetime.now()
>>> df = DateFormat(d)
>>> print(df.format('jS F Y H:i'))
7th October 2003 11:39
>>>
"""
import calendar
import datetime
import time
from email.utils import format_datetime as format_datetime_rfc5322
from django.utils.dates import (
MONTHS, MONTHS_3, MONTHS_ALT, MONTHS_AP, WEEKDAYS, WEEKDAYS_ABBR,
)
from django.utils.regex_helper import _lazy_re_compile
from django.utils.timezone import (
_datetime_ambiguous_or_imaginary, get_default_timezone, is_aware, is_naive,
make_aware,
)
from django.utils.translation import gettext as _
re_formatchars = _lazy_re_compile(r'(?<!\\)([aAbcdDeEfFgGhHiIjlLmMnNoOPrsStTUuwWyYzZ])')
re_escaped = _lazy_re_compile(r'\\(.)')
class Formatter:
def format(self, formatstr):
pieces = []
for i, piece in enumerate(re_formatchars.split(str(formatstr))):
if i % 2:
if type(self.data) is datetime.date and hasattr(TimeFormat, piece):
raise TypeError(
"The format for date objects may not contain "
"time-related format specifiers (found '%s')." % piece
)
pieces.append(str(getattr(self, piece)()))
elif piece:
pieces.append(re_escaped.sub(r'\1', piece))
return ''.join(pieces)
class TimeFormat(Formatter):
def __init__(self, obj):
self.data = obj
self.timezone = None
# We only support timezone when formatting datetime objects,
# not date objects (timezone information not appropriate),
# or time objects (against established django policy).
if isinstance(obj, datetime.datetime):
if is_naive(obj):
self.timezone = get_default_timezone()
else:
self.timezone = obj.tzinfo
def a(self):
"'a.m.' or 'p.m.'"
if self.data.hour > 11:
return _('p.m.')
return _('a.m.')
def A(self):
"'AM' or 'PM'"
if self.data.hour > 11:
return _('PM')
return _('AM')
def e(self):
"""
Timezone name.
If timezone information is not available, return an empty string.
"""
if not self.timezone:
return ""
try:
if hasattr(self.data, 'tzinfo') and self.data.tzinfo:
return self.data.tzname() or ''
except NotImplementedError:
pass
return ""
def f(self):
"""
Time, in 12-hour hours and minutes, with minutes left off if they're
zero.
Examples: '1', '1:30', '2:05', '2'
Proprietary extension.
"""
if self.data.minute == 0:
return self.g()
return '%s:%s' % (self.g(), self.i())
def g(self):
"Hour, 12-hour format without leading zeros; i.e. '1' to '12'"
return self.data.hour % 12 or 12
def G(self):
"Hour, 24-hour format without leading zeros; i.e. '0' to '23'"
return self.data.hour
def h(self):
"Hour, 12-hour format; i.e. '01' to '12'"
return '%02d' % self.g()
def H(self):
"Hour, 24-hour format; i.e. '00' to '23'"
return '%02d' % self.G()
def i(self):
"Minutes; i.e. '00' to '59'"
return '%02d' % self.data.minute
def O(self): # NOQA: E743, E741
"""
Difference to Greenwich time in hours; e.g. '+0200', '-0430'.
If timezone information is not available, return an empty string.
"""
if not self.timezone:
return ""
seconds = self.Z()
if seconds == "":
return ""
sign = '-' if seconds < 0 else '+'
seconds = abs(seconds)
return "%s%02d%02d" % (sign, seconds // 3600, (seconds // 60) % 60)
def P(self):
"""
Time, in 12-hour hours, minutes and 'a.m.'/'p.m.', with minutes left off
if they're zero and the strings 'midnight' and 'noon' if appropriate.
Examples: '1 a.m.', '1:30 p.m.', 'midnight', 'noon', '12:30 p.m.'
Proprietary extension.
"""
if self.data.minute == 0 and self.data.hour == 0:
return _('midnight')
if self.data.minute == 0 and self.data.hour == 12:
return _('noon')
return '%s %s' % (self.f(), self.a())
def s(self):
"Seconds; i.e. '00' to '59'"
return '%02d' % self.data.second
def T(self):
"""
Time zone of this machine; e.g. 'EST' or 'MDT'.
If timezone information is not available, return an empty string.
"""
if not self.timezone:
return ""
if not _datetime_ambiguous_or_imaginary(self.data, self.timezone):
name = self.timezone.tzname(self.data)
else:
name = self.format('O')
return str(name)
def u(self):
"Microseconds; i.e. '000000' to '999999'"
return '%06d' % self.data.microsecond
def Z(self):
"""
Time zone offset in seconds (i.e. '-43200' to '43200'). The offset for
timezones west of UTC is always negative, and for those east of UTC is
always positive.
If timezone information is not available, return an empty string.
"""
if (
not self.timezone or
_datetime_ambiguous_or_imaginary(self.data, self.timezone)
):
return ""
offset = self.timezone.utcoffset(self.data)
# `offset` is a datetime.timedelta. For negative values (to the west of
# UTC) only days can be negative (days=-1) and seconds are always
# positive. e.g. UTC-1 -> timedelta(days=-1, seconds=82800, microseconds=0)
# Positive offsets have days=0
return offset.days * 86400 + offset.seconds
class DateFormat(TimeFormat):
def b(self):
"Month, textual, 3 letters, lowercase; e.g. 'jan'"
return MONTHS_3[self.data.month]
def c(self):
"""
ISO 8601 Format
Example : '2008-01-02T10:30:00.000123'
"""
return self.data.isoformat()
def d(self):
"Day of the month, 2 digits with leading zeros; i.e. '01' to '31'"
return '%02d' % self.data.day
def D(self):
"Day of the week, textual, 3 letters; e.g. 'Fri'"
return WEEKDAYS_ABBR[self.data.weekday()]
def E(self):
"Alternative month names as required by some locales. Proprietary extension."
return MONTHS_ALT[self.data.month]
def F(self):
"Month, textual, long; e.g. 'January'"
return MONTHS[self.data.month]
def I(self): # NOQA: E743, E741
"'1' if Daylight Savings Time, '0' otherwise."
if (
not self.timezone or
_datetime_ambiguous_or_imaginary(self.data, self.timezone)
):
return ''
return '1' if self.timezone.dst(self.data) else '0'
def j(self):
"Day of the month without leading zeros; i.e. '1' to '31'"
return self.data.day
def l(self): # NOQA: E743, E741
"Day of the week, textual, long; e.g. 'Friday'"
return WEEKDAYS[self.data.weekday()]
def L(self):
"Boolean for whether it is a leap year; i.e. True or False"
return calendar.isleap(self.data.year)
def m(self):
"Month; i.e. '01' to '12'"
return '%02d' % self.data.month
def M(self):
"Month, textual, 3 letters; e.g. 'Jan'"
return MONTHS_3[self.data.month].title()
def n(self):
"Month without leading zeros; i.e. '1' to '12'"
return self.data.month
def N(self):
"Month abbreviation in Associated Press style. Proprietary extension."
return MONTHS_AP[self.data.month]
def o(self):
"ISO 8601 year number matching the ISO week number (W)"
return self.data.isocalendar()[0]
def r(self):
"RFC 5322 formatted date; e.g. 'Thu, 21 Dec 2000 16:01:07 +0200'"
if type(self.data) is datetime.date:
raise TypeError(
"The format for date objects may not contain time-related "
"format specifiers (found 'r')."
)
if is_naive(self.data):
dt = make_aware(self.data, timezone=self.timezone)
else:
dt = self.data
return format_datetime_rfc5322(dt)
def S(self):
"English ordinal suffix for the day of the month, 2 characters; i.e. 'st', 'nd', 'rd' or 'th'"
if self.data.day in (11, 12, 13): # Special case
return 'th'
last = self.data.day % 10
if last == 1:
return 'st'
if last == 2:
return 'nd'
if last == 3:
return 'rd'
return 'th'
def t(self):
"Number of days in the given month; i.e. '28' to '31'"
return '%02d' % calendar.monthrange(self.data.year, self.data.month)[1]
def U(self):
"Seconds since the Unix epoch (January 1 1970 00:00:00 GMT)"
if isinstance(self.data, datetime.datetime) and is_aware(self.data):
return int(calendar.timegm(self.data.utctimetuple()))
else:
return int(time.mktime(self.data.timetuple()))
def w(self):
"Day of the week, numeric, i.e. '0' (Sunday) to '6' (Saturday)"
return (self.data.weekday() + 1) % 7
def W(self):
"ISO-8601 week number of year, weeks starting on Monday"
return self.data.isocalendar()[1]
def y(self):
"""Year, 2 digits with leading zeros; e.g. '99'."""
return '%02d' % (self.data.year % 100)
def Y(self):
"Year, 4 digits; e.g. '1999'"
return self.data.year
def z(self):
"""Day of the year, i.e. 1 to 366."""
return self.data.timetuple().tm_yday
def format(value, format_string):
"Convenience function"
df = DateFormat(value)
return df.format(format_string)
def time_format(value, format_string):
"Convenience function"
tf = TimeFormat(value)
return tf.format(format_string)
| |
#!/usr/bin/env python
#
# Copyright 2014 Corgan Labs
# See LICENSE.txt for distribution terms
#
import os
import hmac
import hashlib
import ecdsa
import struct
import Base58
from hashlib import sha256
from ecdsa.curves import SECP256k1
from ecdsa.ecdsa import int_to_string, string_to_int
from ecdsa.numbertheory import square_root_mod_prime as sqrt_mod
MIN_ENTROPY_LEN = 128 # bits
BIP32_HARDEN = 0x80000000 # choose from hardened set of child keys
CURVE_GEN = ecdsa.ecdsa.generator_secp256k1
CURVE_ORDER = CURVE_GEN.order()
FIELD_ORDER = SECP256k1.curve.p()
INFINITY = ecdsa.ellipticcurve.INFINITY
EX_MAIN_PRIVATE = '0488ade4'.decode('hex') # Version string for mainnet extended private keys
EX_MAIN_PUBLIC = '0488b21e'.decode('hex') # Version string for mainnet extended public keys
class BIP32Key(object):
# Static initializers to create from entropy or external formats
#
@staticmethod
def fromEntropy(entropy, public=False):
"Create a BIP32Key using supplied entropy >= MIN_ENTROPY_LEN"
if entropy == None:
entropy = os.urandom(MIN_ENTROPY_LEN/8) # Python doesn't have os.random()
if not len(entropy) >= MIN_ENTROPY_LEN/8:
raise ValueError("Initial entropy %i must be at least %i bits" %
(len(entropy), MIN_ENTROPY_LEN))
I = hmac.new("Bitcoin seed", entropy, hashlib.sha512).digest()
Il, Ir = I[:32], I[32:]
# FIXME test Il for 0 or less than SECP256k1 prime field order
key = BIP32Key(secret=Il, chain=Ir, depth=0, index=0, fpr='\0\0\0\0', public=False)
if public:
key.SetPublic()
return key
@staticmethod
def fromExtendedKey(xkey, public=False):
"""
Create a BIP32Key by importing from extended private or public key string
If public is True, return a public-only key regardless of input type.
"""
# Sanity checks
raw = Base58.check_decode(xkey)
if len(raw) != 78:
raise ValueError("extended key format wrong length")
# Verify address version/type
version = raw[:4]
if version == EX_MAIN_PRIVATE:
keytype = 'xprv'
elif version == EX_MAIN_PUBLIC:
keytype = 'xpub'
else:
raise ValueError("unknown extended key version")
# Extract remaining fields
depth = ord(raw[4])
fpr = raw[5:9]
child = struct.unpack(">L", raw[9:13])[0]
chain = raw[13:45]
secret = raw[45:78]
# Extract private key or public key point
if keytype == 'xprv':
secret = secret[1:]
else:
# Recover public curve point from compressed key
lsb = ord(secret[0]) & 1
x = string_to_int(secret[1:])
ys = (x**3+7) % FIELD_ORDER # y^2 = x^3 + 7 mod p
y = sqrt_mod(ys, FIELD_ORDER)
if y & 1 != lsb:
y = FIELD_ORDER-y
point = ecdsa.ellipticcurve.Point(SECP256k1.curve, x, y)
secret = ecdsa.VerifyingKey.from_public_point(point, curve=SECP256k1)
is_pubkey = (keytype == 'xpub')
key = BIP32Key(secret=secret, chain=chain, depth=depth, index=child, fpr=fpr, public=is_pubkey)
if not is_pubkey and public:
key = key.SetPublic()
return key
# Normal class initializer
def __init__(self, secret, chain, depth, index, fpr, public=False):
"""
Create a public or private BIP32Key using key material and chain code.
secret This is the source material to generate the keypair, either a
32-byte string representation of a private key, or the ECDSA
library object representing a public key.
chain This is a 32-byte string representation of the chain code
depth Child depth; parent increments its own by one when assigning this
index Child index
fpr Parent fingerprint
public If true, this keypair will only contain a public key and can only create
a public key chain.
"""
self.public = public
if public is False:
self.k = ecdsa.SigningKey.from_string(secret, curve=SECP256k1)
self.K = self.k.get_verifying_key()
else:
self.k = None
self.K = secret
self.C = chain
self.depth = depth
self.index = index
self.parent_fpr = fpr
# Internal methods not intended to be called externally
#
def hmac(self, data):
"""
Calculate the HMAC-SHA512 of input data using the chain code as key.
Returns a tuple of the left and right halves of the HMAC
"""
I = hmac.new(self.C, data, hashlib.sha512).digest()
return (I[:32], I[32:])
def CKDpriv(self, i):
"""
Create a child key of index 'i'.
If the most significant bit of 'i' is set, then select from the
hardened key set, otherwise, select a regular child key.
Returns a BIP32Key constructed with the child key parameters,
or None if i index would result in an invalid key.
"""
# Index as bytes, BE
i_str = struct.pack(">L", i)
# Data to HMAC
if i & BIP32_HARDEN:
data = b'\0' + self.k.to_string() + i_str
else:
data = self.PublicKey() + i_str
# Get HMAC of data
(Il, Ir) = self.hmac(data)
# Construct new key material from Il and current private key
Il_int = string_to_int(Il)
if Il_int > CURVE_ORDER:
return None
pvt_int = string_to_int(self.k.to_string())
k_int = (Il_int + pvt_int) % CURVE_ORDER
if (k_int == 0):
return None
secret = (b'\0'*32 + int_to_string(k_int))[-32:]
# Construct and return a new BIP32Key
return BIP32Key(secret=secret, chain=Ir, depth=self.depth+1, index=i, fpr=self.Fingerprint(), public=False)
def CKDpub(self, i):
"""
Create a publicly derived child key of index 'i'.
If the most significant bit of 'i' is set, this is
an error.
Returns a BIP32Key constructed with the child key parameters,
or None if index would result in invalid key.
"""
if i & BIP32_HARDEN:
raise Exception("Cannot create a hardened child key using public child derivation")
# Data to HMAC. Same as CKDpriv() for public child key.
data = self.PublicKey() + struct.pack(">L", i)
# Get HMAC of data
(Il, Ir) = self.hmac(data)
# Construct curve point Il*G+K
Il_int = string_to_int(Il)
if Il_int >= CURVE_ORDER:
return None
point = Il_int*CURVE_GEN + self.K.pubkey.point
if point == INFINITY:
return None
# Retrieve public key based on curve point
K_i = ecdsa.VerifyingKey.from_public_point(point, curve=SECP256k1)
# Construct and return a new BIP32Key
return BIP32Key(secret=K_i, chain=Ir, depth=self.depth+1, index=i, fpr=self.Fingerprint(), public=True)
# Public methods
#
def ChildKey(self, i):
"""
Create and return a child key of this one at index 'i'.
The index 'i' should be summed with BIP32_HARDEN to indicate
to use the private derivation algorithm.
"""
if self.public is False:
return self.CKDpriv(i)
else:
return self.CKDpub(i)
def SetPublic(self):
"Convert a private BIP32Key into a public one"
self.k = None
self.public = True
def PrivateKey(self):
"Return private key as string"
if self.public:
raise Exception("Publicly derived deterministic keys have no private half")
else:
return self.k.to_string()
def PublicKey(self):
"Return compressed public key encoding"
padx = (b'\0'*32 + int_to_string(self.K.pubkey.point.x()))[-32:]
if self.K.pubkey.point.y() & 1:
ck = b'\3'+padx
else:
ck = b'\2'+padx
return ck
def ChainCode(self):
"Return chain code as string"
return self.C
def Identifier(self):
"Return key identifier as string"
cK = self.PublicKey()
return hashlib.new('ripemd160', sha256(cK).digest()).digest()
def Fingerprint(self):
"Return key fingerprint as string"
return self.Identifier()[:4]
def Address(self):
"Return compressed public key address"
vh160 = '\x00'+self.Identifier()
return Base58.check_encode(vh160)
def WalletImportFormat(self):
"Returns private key encoded for wallet import"
if self.public:
raise Exception("Publicly derived deterministic keys have no private half")
raw = '\x80' + self.k.to_string() + '\x01' # Always compressed
return Base58.check_encode(raw)
def ExtendedKey(self, private=True, encoded=True):
"Return extended private or public key as string, optionally Base58 encoded"
if self.public is True and private is True:
raise Exception("Cannot export an extended private key from a public-only deterministic key")
version = EX_MAIN_PRIVATE if private else EX_MAIN_PUBLIC
depth = chr(self.depth)
fpr = self.parent_fpr
child = struct.pack('>L', self.index)
chain = self.C
if self.public is True or private is False:
data = self.PublicKey()
else:
data = '\x00' + self.PrivateKey()
raw = version+depth+fpr+child+chain+data
if not encoded:
return raw
else:
return Base58.check_encode(raw)
# Debugging methods
#
def dump(self):
"Dump key fields mimicking the BIP0032 test vector format"
print " * Identifier"
print " * (hex): ", self.Identifier().encode('hex')
print " * (fpr): ", self.Fingerprint().encode('hex')
print " * (main addr):", self.Address()
if self.public is False:
print " * Secret key"
print " * (hex): ", self.PrivateKey().encode('hex')
print " * (wif): ", self.WalletImportFormat()
print " * Public key"
print " * (hex): ", self.PublicKey().encode('hex')
print " * Chain code"
print " * (hex): ", self.C.encode('hex')
print " * Serialized"
print " * (pub hex): ", self.ExtendedKey(private=False, encoded=False).encode('hex')
print " * (prv hex): ", self.ExtendedKey(private=True, encoded=False).encode('hex')
print " * (pub b58): ", self.ExtendedKey(private=False, encoded=True)
print " * (prv b58): ", self.ExtendedKey(private=True, encoded=True)
if __name__ == "__main__":
import sys
# BIP0032 Test vector 1
entropy='000102030405060708090A0B0C0D0E0F'.decode('hex')
m = BIP32Key.fromEntropy(entropy)
print "Test vector 1:"
print "Master (hex):", entropy.encode('hex')
print "* [Chain m]"
m.dump()
print "* [Chain m/0h]"
m = m.ChildKey(0+BIP32_HARDEN)
m.dump()
print "* [Chain m/0h/1]"
m = m.ChildKey(1)
m.dump()
print "* [Chain m/0h/1/2h]"
m = m.ChildKey(2+BIP32_HARDEN)
m.dump()
print "* [Chain m/0h/1/2h/2]"
m = m.ChildKey(2)
m.dump()
print "* [Chain m/0h/1/2h/2/1000000000]"
m = m.ChildKey(1000000000)
m.dump()
# BIP0032 Test vector 2
entropy = 'fffcf9f6f3f0edeae7e4e1dedbd8d5d2cfccc9c6c3c0bdbab7b4b1aeaba8a5a29f9c999693908d8a8784817e7b7875726f6c696663605d5a5754514e4b484542'.decode('hex')
m = BIP32Key.fromEntropy(entropy)
print "Test vector 2:"
print "Master (hex):", entropy.encode('hex')
print "* [Chain m]"
m.dump()
print "* [Chain m/0]"
m = m.ChildKey(0)
m.dump()
print "* [Chain m/0/2147483647h]"
m = m.ChildKey(2147483647+BIP32_HARDEN)
m.dump()
print "* [Chain m/0/2147483647h/1]"
m = m.ChildKey(1)
m.dump()
print "* [Chain m/0/2147483647h/1/2147483646h]"
m = m.ChildKey(2147483646+BIP32_HARDEN)
m.dump()
print "* [Chain m/0/2147483647h/1/2147483646h/2]"
m = m.ChildKey(2)
m.dump()
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import unittest
from unittest import mock
import pytest
from airflow.exceptions import AirflowException
from airflow.models.dag import DAG
from airflow.sensors.sql_sensor import SqlSensor
from airflow.utils.timezone import datetime
from tests.providers.apache.hive import TestHiveEnvironment
DEFAULT_DATE = datetime(2015, 1, 1)
TEST_DAG_ID = 'unit_test_sql_dag'
class TestSqlSensor(TestHiveEnvironment):
def setUp(self):
super().setUp()
args = {
'owner': 'airflow',
'start_date': DEFAULT_DATE
}
self.dag = DAG(TEST_DAG_ID, default_args=args)
def test_unsupported_conn_type(self):
op = SqlSensor(
task_id='sql_sensor_check',
conn_id='redis_default',
sql="SELECT count(1) FROM INFORMATION_SCHEMA.TABLES",
dag=self.dag
)
with self.assertRaises(AirflowException):
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
@pytest.mark.backend("mysql")
def test_sql_sensor_mysql(self):
op1 = SqlSensor(
task_id='sql_sensor_check_1',
conn_id='mysql_default',
sql="SELECT count(1) FROM INFORMATION_SCHEMA.TABLES",
dag=self.dag
)
op1.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
op2 = SqlSensor(
task_id='sql_sensor_check_2',
conn_id='mysql_default',
sql="SELECT count(%s) FROM INFORMATION_SCHEMA.TABLES",
parameters=["table_name"],
dag=self.dag
)
op2.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
@pytest.mark.backend("postgres")
def test_sql_sensor_postgres(self):
op1 = SqlSensor(
task_id='sql_sensor_check_1',
conn_id='postgres_default',
sql="SELECT count(1) FROM INFORMATION_SCHEMA.TABLES",
dag=self.dag
)
op1.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
op2 = SqlSensor(
task_id='sql_sensor_check_2',
conn_id='postgres_default',
sql="SELECT count(%s) FROM INFORMATION_SCHEMA.TABLES",
parameters=["table_name"],
dag=self.dag
)
op2.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
@mock.patch('airflow.sensors.sql_sensor.BaseHook')
def test_sql_sensor_postgres_poke(self, mock_hook):
op = SqlSensor(
task_id='sql_sensor_check',
conn_id='postgres_default',
sql="SELECT 1",
)
mock_hook.get_connection('postgres_default').conn_type = "postgres"
mock_get_records = mock_hook.get_connection.return_value.get_hook.return_value.get_records
mock_get_records.return_value = []
self.assertFalse(op.poke(None))
mock_get_records.return_value = [[None]]
self.assertFalse(op.poke(None))
mock_get_records.return_value = [['None']]
self.assertTrue(op.poke(None))
mock_get_records.return_value = [[0.0]]
self.assertFalse(op.poke(None))
mock_get_records.return_value = [[0]]
self.assertFalse(op.poke(None))
mock_get_records.return_value = [['0']]
self.assertTrue(op.poke(None))
mock_get_records.return_value = [['1']]
self.assertTrue(op.poke(None))
@mock.patch('airflow.sensors.sql_sensor.BaseHook')
def test_sql_sensor_postgres_poke_fail_on_empty(self, mock_hook):
op = SqlSensor(
task_id='sql_sensor_check',
conn_id='postgres_default',
sql="SELECT 1",
fail_on_empty=True
)
mock_hook.get_connection('postgres_default').conn_type = "postgres"
mock_get_records = mock_hook.get_connection.return_value.get_hook.return_value.get_records
mock_get_records.return_value = []
self.assertRaises(AirflowException, op.poke, None)
@mock.patch('airflow.sensors.sql_sensor.BaseHook')
def test_sql_sensor_postgres_poke_success(self, mock_hook):
op = SqlSensor(
task_id='sql_sensor_check',
conn_id='postgres_default',
sql="SELECT 1",
success=lambda x: x in [1]
)
mock_hook.get_connection('postgres_default').conn_type = "postgres"
mock_get_records = mock_hook.get_connection.return_value.get_hook.return_value.get_records
mock_get_records.return_value = []
self.assertFalse(op.poke(None))
mock_get_records.return_value = [[1]]
self.assertTrue(op.poke(None))
mock_get_records.return_value = [['1']]
self.assertFalse(op.poke(None))
@mock.patch('airflow.sensors.sql_sensor.BaseHook')
def test_sql_sensor_postgres_poke_failure(self, mock_hook):
op = SqlSensor(
task_id='sql_sensor_check',
conn_id='postgres_default',
sql="SELECT 1",
failure=lambda x: x in [1]
)
mock_hook.get_connection('postgres_default').conn_type = "postgres"
mock_get_records = mock_hook.get_connection.return_value.get_hook.return_value.get_records
mock_get_records.return_value = []
self.assertFalse(op.poke(None))
mock_get_records.return_value = [[1]]
self.assertRaises(AirflowException, op.poke, None)
@mock.patch('airflow.sensors.sql_sensor.BaseHook')
def test_sql_sensor_postgres_poke_failure_success(self, mock_hook):
op = SqlSensor(
task_id='sql_sensor_check',
conn_id='postgres_default',
sql="SELECT 1",
failure=lambda x: x in [1],
success=lambda x: x in [2]
)
mock_hook.get_connection('postgres_default').conn_type = "postgres"
mock_get_records = mock_hook.get_connection.return_value.get_hook.return_value.get_records
mock_get_records.return_value = []
self.assertFalse(op.poke(None))
mock_get_records.return_value = [[1]]
self.assertRaises(AirflowException, op.poke, None)
mock_get_records.return_value = [[2]]
self.assertTrue(op.poke(None))
@mock.patch('airflow.sensors.sql_sensor.BaseHook')
def test_sql_sensor_postgres_poke_failure_success_same(self, mock_hook):
op = SqlSensor(
task_id='sql_sensor_check',
conn_id='postgres_default',
sql="SELECT 1",
failure=lambda x: x in [1],
success=lambda x: x in [1]
)
mock_hook.get_connection('postgres_default').conn_type = "postgres"
mock_get_records = mock_hook.get_connection.return_value.get_hook.return_value.get_records
mock_get_records.return_value = []
self.assertFalse(op.poke(None))
mock_get_records.return_value = [[1]]
self.assertRaises(AirflowException, op.poke, None)
@mock.patch('airflow.sensors.sql_sensor.BaseHook')
def test_sql_sensor_postgres_poke_invalid_failure(self, mock_hook):
op = SqlSensor(
task_id='sql_sensor_check',
conn_id='postgres_default',
sql="SELECT 1",
failure=[1],
)
mock_hook.get_connection('postgres_default').conn_type = "postgres"
mock_get_records = mock_hook.get_connection.return_value.get_hook.return_value.get_records
mock_get_records.return_value = [[1]]
self.assertRaises(AirflowException, op.poke, None)
@mock.patch('airflow.sensors.sql_sensor.BaseHook')
def test_sql_sensor_postgres_poke_invalid_success(self, mock_hook):
op = SqlSensor(
task_id='sql_sensor_check',
conn_id='postgres_default',
sql="SELECT 1",
success=[1],
)
mock_hook.get_connection('postgres_default').conn_type = "postgres"
mock_get_records = mock_hook.get_connection.return_value.get_hook.return_value.get_records
mock_get_records.return_value = [[1]]
self.assertRaises(AirflowException, op.poke, None)
@unittest.skipIf(
'AIRFLOW_RUNALL_TESTS' not in os.environ,
"Skipped because AIRFLOW_RUNALL_TESTS is not set")
def test_sql_sensor_presto(self):
op = SqlSensor(
task_id='hdfs_sensor_check',
conn_id='presto_default',
sql="SELECT 'x' FROM airflow.static_babynames LIMIT 1;",
dag=self.dag)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
| |
#!/usr/bin/env python
__author__ = "waroquiers"
import os
import random
import shutil
import unittest
import numpy as np
from monty.tempfile import ScratchDir
from pymatgen.analysis.chemenv.coordination_environments.voronoi import (
DetailedVoronoiContainer,
)
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.util.testing import PymatgenTest
json_files_dir = os.path.join(
PymatgenTest.TEST_FILES_DIR,
"chemenv",
"json_test_files",
)
img_files_dir = os.path.join(
PymatgenTest.TEST_FILES_DIR,
"chemenv",
"images",
)
class VoronoiContainerTest(PymatgenTest):
def test_voronoi(self):
with ScratchDir("."):
# Define a cubic lattice and a list of species (to be used for the fake structures)
cubic_lattice = Lattice.cubic(10.0)
species = ["Cu", "O", "O", "O", "O", "O", "O"]
valences = "undefined"
# First fake structure
coords = [[5.0, 5.0, 5.0]]
order_and_coords = [
(1, [4.0, 5.0, 5.0]),
(2, [6.01, 5.0, 5.0]),
(3, [5.0, 3.98, 5.0]),
(4, [5.0, 6.03, 5.0]),
(5, [5.0, 5.0, 3.96]),
(6, [5.0, 5.0, 6.05]),
]
random.shuffle(order_and_coords)
sorted = np.argsort([oc[0] for oc in order_and_coords]) + 1
coords.extend([oc[1] for oc in order_and_coords])
fake_structure = Structure(cubic_lattice, species, coords, coords_are_cartesian=True)
# First fake structure with a given normalized_distance_tolerance of 0.0100001
detailed_voronoi_container = DetailedVoronoiContainer(
structure=fake_structure,
valences=valences,
normalized_distance_tolerance=0.0100001,
isites=[0],
)
self.assertEqual(len(detailed_voronoi_container.voronoi_list2[0]), 6)
neighbors = detailed_voronoi_container.neighbors(0, 1.0, 0.5, True)
self.assertEqual(len(neighbors), 6)
neighbors = detailed_voronoi_container.neighbors(0, 1.02, 0.5, True)
self.assertEqual(len(neighbors), 6)
neighbors = detailed_voronoi_container.neighbors(0, 1.026, 0.5, True)
self.assertEqual(len(neighbors), 6)
neighbors = detailed_voronoi_container.neighbors(0, 1.5, 0.5, True)
self.assertEqual(len(neighbors), 6)
# First fake structure with a given normalized_distance_tolerance of 0.001
detailed_voronoi_container = DetailedVoronoiContainer(
structure=fake_structure,
valences=valences,
normalized_distance_tolerance=0.001,
isites=[0],
)
self.assertEqual(len(detailed_voronoi_container.voronoi_list2[0]), 6)
neighbors = detailed_voronoi_container.neighbors(0, 1.0, 0.5, True)
self.assertEqual(len(neighbors), 1)
self.assertEqual(neighbors[0]["site"], fake_structure[sorted[0]])
neighbors = detailed_voronoi_container.neighbors(0, 1.02, 0.5, True)
nbs = [nb["site"] for nb in neighbors]
self.assertEqual(len(neighbors), 3)
self.assertTrue(fake_structure[sorted[0]] in nbs)
self.assertTrue(fake_structure[sorted[1]] in nbs)
self.assertTrue(fake_structure[sorted[2]] in nbs)
neighbors = detailed_voronoi_container.neighbors(0, 1.026, 0.5, True)
nbs = [nb["site"] for nb in neighbors]
self.assertEqual(len(neighbors), 3)
self.assertTrue(fake_structure[sorted[0]] in nbs)
self.assertTrue(fake_structure[sorted[1]] in nbs)
self.assertTrue(fake_structure[sorted[2]] in nbs)
neighbors = detailed_voronoi_container.neighbors(0, 1.5, 0.5, True)
self.assertEqual(len(neighbors), 6)
# Second fake structure
coords2 = [[5.0, 5.0, 5.0]]
order_and_coords = [
(1, [4.0, 5.0, 5.0]),
(2, [6.01, 5.0, 5.0]),
(3, [5.0, 3.98, 5.0]),
(4, [5.0, 6.07, 5.0]),
(5, [5.0, 5.0, 3.92]),
(6, [5.0, 5.0, 6.09]),
]
random.shuffle(order_and_coords)
sorted = np.argsort([oc[0] for oc in order_and_coords]) + 1
coords2.extend([oc[1] for oc in order_and_coords])
fake_structure2 = Structure(cubic_lattice, species, coords2, coords_are_cartesian=True)
# Second fake structure with a given normalized_distance_tolerance of 0.0100001
detailed_voronoi_container = DetailedVoronoiContainer(
structure=fake_structure2,
valences=valences,
normalized_distance_tolerance=0.0100001,
isites=[0],
)
self.assertEqual(len(detailed_voronoi_container.voronoi_list2[0]), 6)
neighbors = detailed_voronoi_container.neighbors(0, 1.0, 0.5, True)
nbs = [nb["site"] for nb in neighbors]
self.assertEqual(len(neighbors), 3)
self.assertTrue(fake_structure2[sorted[0]] in nbs)
self.assertTrue(fake_structure2[sorted[1]] in nbs)
self.assertTrue(fake_structure2[sorted[2]] in nbs)
neighbors = detailed_voronoi_container.neighbors(0, 1.02, 0.5, True)
nbs = [nb["site"] for nb in neighbors]
self.assertEqual(len(neighbors), 3)
self.assertTrue(fake_structure2[sorted[0]] in nbs)
self.assertTrue(fake_structure2[sorted[1]] in nbs)
self.assertTrue(fake_structure2[sorted[2]] in nbs)
neighbors = detailed_voronoi_container.neighbors(0, 1.026, 0.5, True)
nbs = [nb["site"] for nb in neighbors]
self.assertEqual(len(neighbors), 3)
self.assertTrue(fake_structure2[sorted[0]] in nbs)
self.assertTrue(fake_structure2[sorted[1]] in nbs)
self.assertTrue(fake_structure2[sorted[2]] in nbs)
neighbors = detailed_voronoi_container.neighbors(0, 1.5, 0.5, True)
self.assertEqual(len(neighbors), 6)
species = ["Cu", "Cu", "O", "O", "O", "Cu", "O"]
valences = [2, 2, -2, -2, -2, 2, -2]
# Third fake structure (test of the only_anion_cation_bonds)
coords = [
[5.0, 5.0, 5.0],
[6.01, 5.0, 5.0],
[5.0, 5.0, 3.96],
[4.0, 5.0, 5.0],
[5.0, 6.03, 5.0],
[5.0, 3.98, 5.0],
[5.0, 5.0, 6.05],
]
fake_structure3 = Structure(cubic_lattice, species, coords, coords_are_cartesian=True)
detailed_voronoi_container = DetailedVoronoiContainer(
structure=fake_structure3,
valences=valences,
normalized_distance_tolerance=0.0100001,
isites=[0],
additional_conditions=[DetailedVoronoiContainer.AC.ONLY_ACB],
)
self.assertEqual(len(detailed_voronoi_container.voronoi_list2[0]), 6)
neighbors = detailed_voronoi_container.neighbors(0, 1.01, 0.5, True)
nbs = [nb["site"] for nb in neighbors]
self.assertEqual(len(neighbors), 6)
self.assertTrue(fake_structure3[1] in nbs)
self.assertTrue(fake_structure3[2] in nbs)
self.assertTrue(fake_structure3[3] in nbs)
self.assertTrue(fake_structure3[4] in nbs)
self.assertTrue(fake_structure3[5] in nbs)
self.assertTrue(fake_structure3[6] in nbs)
# Test of the as_dict() and from_dict() methods as well as __eq__ method
other_detailed_voronoi_container = DetailedVoronoiContainer.from_dict(detailed_voronoi_container.as_dict())
self.assertTrue(detailed_voronoi_container, other_detailed_voronoi_container)
def test_get_vertices_dist_ang_indices(self):
with ScratchDir("."):
cubic_lattice = Lattice.cubic(10.0)
species = ["Cu", "O", "O", "O", "O", "O", "O"]
valences = "undefined"
# First fake structure
coords = [
[5.0, 5.0, 5.0],
[6.01, 5.0, 5.0],
[5.0, 5.0, 3.96],
[4.0, 5.0, 5.0],
[5.0, 6.03, 5.0],
[5.0, 3.98, 5.0],
[5.0, 5.0, 6.05],
]
fake_structure = Structure(cubic_lattice, species, coords, coords_are_cartesian=True)
# First fake structure with a given normalized_distance_tolerance of 0.0100001
detailed_voronoi_container = DetailedVoronoiContainer(
structure=fake_structure,
valences=valences,
normalized_distance_tolerance=0.0100001,
isites=[0],
)
fake_parameter_indices_list = []
for ii in range(2, 5):
for jj in range(7, 14):
fake_parameter_indices_list.append((ii, jj))
for ii in range(5, 7):
for jj in range(10, 14):
fake_parameter_indices_list.append((ii, jj))
points = detailed_voronoi_container._get_vertices_dist_ang_indices(fake_parameter_indices_list)
self.assertEqual(points[0], (2, 7))
self.assertEqual(points[1], (4, 7))
self.assertEqual(points[2], (4, 10))
self.assertEqual(points[3], (6, 10))
self.assertEqual(points[4], (6, 13))
self.assertEqual(points[5], (2, 13))
if __name__ == "__main__":
unittest.main()
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Defines interface for DB access.
The underlying driver is loaded as a :class:`LazyPluggable`.
**Related Flags**
:db_backend: string to lookup in the list of LazyPluggable backends.
`sqlalchemy` is the only supported backend right now.
:sql_connection: string specifying the sqlalchemy connection to use, like:
`sqlite:///var/lib/nova/nova.sqlite`.
:enable_new_services: when adding a new service to the database, is it in the
pool of available hardware (Default: True)
"""
from nova import exception
from nova import flags
from nova import utils
FLAGS = flags.FLAGS
flags.DEFINE_string('db_backend', 'sqlalchemy',
'The backend to use for db')
flags.DEFINE_boolean('enable_new_services', True,
'Services to be added to the available pool on create')
flags.DEFINE_string('instance_name_template', 'instance-%08x',
'Template string to be used to generate instance names')
flags.DEFINE_string('volume_name_template', 'volume-%08x',
'Template string to be used to generate instance names')
IMPL = utils.LazyPluggable(FLAGS['db_backend'],
sqlalchemy='nova.db.sqlalchemy.api')
class NoMoreAddresses(exception.Error):
"""No more available addresses."""
pass
class NoMoreBlades(exception.Error):
"""No more available blades."""
pass
class NoMoreNetworks(exception.Error):
"""No more available networks."""
pass
class NoMoreTargets(exception.Error):
"""No more available blades"""
pass
###################
def service_destroy(context, instance_id):
"""Destroy the service or raise if it does not exist."""
return IMPL.service_destroy(context, instance_id)
def service_get(context, service_id):
"""Get a service or raise if it does not exist."""
return IMPL.service_get(context, service_id)
def service_get_by_host_and_topic(context, host, topic):
"""Get a service by host it's on and topic it listens to."""
return IMPL.service_get_by_host_and_topic(context, host, topic)
def service_get_all(context, disabled=None):
"""Get all services."""
return IMPL.service_get_all(context, disabled)
def service_get_all_by_topic(context, topic):
"""Get all services for a given topic."""
return IMPL.service_get_all_by_topic(context, topic)
def service_get_all_by_host(context, host):
"""Get all services for a given host."""
return IMPL.service_get_all_by_host(context, host)
def service_get_all_compute_by_host(context, host):
"""Get all compute services for a given host."""
return IMPL.service_get_all_compute_by_host(context, host)
def service_get_all_compute_sorted(context):
"""Get all compute services sorted by instance count.
:returns: a list of (Service, instance_count) tuples.
"""
return IMPL.service_get_all_compute_sorted(context)
def service_get_all_network_sorted(context):
"""Get all network services sorted by network count.
:returns: a list of (Service, network_count) tuples.
"""
return IMPL.service_get_all_network_sorted(context)
def service_get_all_volume_sorted(context):
"""Get all volume services sorted by volume count.
:returns: a list of (Service, volume_count) tuples.
"""
return IMPL.service_get_all_volume_sorted(context)
def service_get_by_args(context, host, binary):
"""Get the state of an service by node name and binary."""
return IMPL.service_get_by_args(context, host, binary)
def service_create(context, values):
"""Create a service from the values dictionary."""
return IMPL.service_create(context, values)
def service_update(context, service_id, values):
"""Set the given properties on an service and update it.
Raises NotFound if service does not exist.
"""
return IMPL.service_update(context, service_id, values)
###################
def compute_node_get(context, compute_id, session=None):
"""Get an computeNode or raise if it does not exist."""
return IMPL.compute_node_get(context, compute_id)
def compute_node_create(context, values):
"""Create a computeNode from the values dictionary."""
return IMPL.compute_node_create(context, values)
def compute_node_update(context, compute_id, values):
"""Set the given properties on an computeNode and update it.
Raises NotFound if computeNode does not exist.
"""
return IMPL.compute_node_update(context, compute_id, values)
###################
def certificate_create(context, values):
"""Create a certificate from the values dictionary."""
return IMPL.certificate_create(context, values)
def certificate_destroy(context, certificate_id):
"""Destroy the certificate or raise if it does not exist."""
return IMPL.certificate_destroy(context, certificate_id)
def certificate_get_all_by_project(context, project_id):
"""Get all certificates for a project."""
return IMPL.certificate_get_all_by_project(context, project_id)
def certificate_get_all_by_user(context, user_id):
"""Get all certificates for a user."""
return IMPL.certificate_get_all_by_user(context, user_id)
def certificate_get_all_by_user_and_project(context, user_id, project_id):
"""Get all certificates for a user and project."""
return IMPL.certificate_get_all_by_user_and_project(context,
user_id,
project_id)
def certificate_update(context, certificate_id, values):
"""Set the given properties on an certificate and update it.
Raises NotFound if service does not exist.
"""
return IMPL.certificate_update(context, certificate_id, values)
###################
def floating_ip_allocate_address(context, host, project_id):
"""Allocate free floating ip and return the address.
Raises if one is not available.
"""
return IMPL.floating_ip_allocate_address(context, host, project_id)
def floating_ip_create(context, values):
"""Create a floating ip from the values dictionary."""
return IMPL.floating_ip_create(context, values)
def floating_ip_count_by_project(context, project_id):
"""Count floating ips used by project."""
return IMPL.floating_ip_count_by_project(context, project_id)
def floating_ip_deallocate(context, address):
"""Deallocate an floating ip by address."""
return IMPL.floating_ip_deallocate(context, address)
def floating_ip_destroy(context, address):
"""Destroy the floating_ip or raise if it does not exist."""
return IMPL.floating_ip_destroy(context, address)
def floating_ip_disassociate(context, address):
"""Disassociate an floating ip from a fixed ip by address.
:returns: the address of the existing fixed ip.
"""
return IMPL.floating_ip_disassociate(context, address)
def floating_ip_fixed_ip_associate(context, floating_address, fixed_address):
"""Associate an floating ip to a fixed_ip by address."""
return IMPL.floating_ip_fixed_ip_associate(context,
floating_address,
fixed_address)
def floating_ip_get_all(context):
"""Get all floating ips."""
return IMPL.floating_ip_get_all(context)
def floating_ip_get_all_by_host(context, host):
"""Get all floating ips by host."""
return IMPL.floating_ip_get_all_by_host(context, host)
def floating_ip_get_all_by_project(context, project_id):
"""Get all floating ips by project."""
return IMPL.floating_ip_get_all_by_project(context, project_id)
def floating_ip_get_by_address(context, address):
"""Get a floating ip by address or raise if it doesn't exist."""
return IMPL.floating_ip_get_by_address(context, address)
def floating_ip_update(context, address, values):
"""Update a floating ip by address or raise if it doesn't exist."""
return IMPL.floating_ip_update(context, address, values)
def floating_ip_set_auto_assigned(context, address):
"""Set auto_assigned flag to floating ip"""
return IMPL.floating_ip_set_auto_assigned(context, address)
####################
def migration_update(context, id, values):
"""Update a migration instance."""
return IMPL.migration_update(context, id, values)
def migration_create(context, values):
"""Create a migration record."""
return IMPL.migration_create(context, values)
def migration_get(context, migration_id):
"""Finds a migration by the id."""
return IMPL.migration_get(context, migration_id)
def migration_get_by_instance_and_status(context, instance_id, status):
"""Finds a migration by the instance id its migrating."""
return IMPL.migration_get_by_instance_and_status(context, instance_id,
status)
####################
def fixed_ip_associate(context, address, instance_id):
"""Associate fixed ip to instance.
Raises if fixed ip is not available.
"""
return IMPL.fixed_ip_associate(context, address, instance_id)
def fixed_ip_associate_pool(context, network_id, instance_id):
"""Find free ip in network and associate it to instance.
Raises if one is not available.
"""
return IMPL.fixed_ip_associate_pool(context, network_id, instance_id)
def fixed_ip_create(context, values):
"""Create a fixed ip from the values dictionary."""
return IMPL.fixed_ip_create(context, values)
def fixed_ip_disassociate(context, address):
"""Disassociate a fixed ip from an instance by address."""
return IMPL.fixed_ip_disassociate(context, address)
def fixed_ip_disassociate_all_by_timeout(context, host, time):
"""Disassociate old fixed ips from host."""
return IMPL.fixed_ip_disassociate_all_by_timeout(context, host, time)
def fixed_ip_get_all(context):
"""Get all defined fixed ips."""
return IMPL.fixed_ip_get_all(context)
def fixed_ip_get_all_by_host(context, host):
"""Get all defined fixed ips used by a host."""
return IMPL.fixed_ip_get_all_by_host(context, host)
def fixed_ip_get_by_address(context, address):
"""Get a fixed ip by address or raise if it does not exist."""
return IMPL.fixed_ip_get_by_address(context, address)
def fixed_ip_get_all_by_instance(context, instance_id):
"""Get fixed ips by instance or raise if none exist."""
return IMPL.fixed_ip_get_all_by_instance(context, instance_id)
def fixed_ip_get_instance(context, address):
"""Get an instance for a fixed ip by address."""
return IMPL.fixed_ip_get_instance(context, address)
def fixed_ip_get_instance_v6(context, address):
return IMPL.fixed_ip_get_instance_v6(context, address)
def fixed_ip_get_network(context, address):
"""Get a network for a fixed ip by address."""
return IMPL.fixed_ip_get_network(context, address)
def fixed_ip_update(context, address, values):
"""Create a fixed ip from the values dictionary."""
return IMPL.fixed_ip_update(context, address, values)
####################
def instance_create(context, values):
"""Create an instance from the values dictionary."""
return IMPL.instance_create(context, values)
def instance_data_get_for_project(context, project_id):
"""Get (instance_count, core_count) for project."""
return IMPL.instance_data_get_for_project(context, project_id)
def instance_destroy(context, instance_id):
"""Destroy the instance or raise if it does not exist."""
return IMPL.instance_destroy(context, instance_id)
def instance_get(context, instance_id):
"""Get an instance or raise if it does not exist."""
return IMPL.instance_get(context, instance_id)
def instance_get_all(context):
"""Get all instances."""
return IMPL.instance_get_all(context)
def instance_get_all_by_user(context, user_id):
"""Get all instances."""
return IMPL.instance_get_all_by_user(context, user_id)
def instance_get_all_by_project(context, project_id):
"""Get all instance belonging to a project."""
return IMPL.instance_get_all_by_project(context, project_id)
def instance_get_all_by_host(context, host):
"""Get all instance belonging to a host."""
return IMPL.instance_get_all_by_host(context, host)
def instance_get_all_by_reservation(context, reservation_id):
"""Get all instance belonging to a reservation."""
return IMPL.instance_get_all_by_reservation(context, reservation_id)
def instance_get_fixed_address(context, instance_id):
"""Get the fixed ip address of an instance."""
return IMPL.instance_get_fixed_address(context, instance_id)
def instance_get_fixed_address_v6(context, instance_id):
return IMPL.instance_get_fixed_address_v6(context, instance_id)
def instance_get_floating_address(context, instance_id):
"""Get the first floating ip address of an instance."""
return IMPL.instance_get_floating_address(context, instance_id)
def instance_get_project_vpn(context, project_id):
"""Get a vpn instance by project or return None."""
return IMPL.instance_get_project_vpn(context, project_id)
def instance_set_state(context, instance_id, state, description=None):
"""Set the state of an instance."""
return IMPL.instance_set_state(context, instance_id, state, description)
def instance_update(context, instance_id, values):
"""Set the given properties on an instance and update it.
Raises NotFound if instance does not exist.
"""
return IMPL.instance_update(context, instance_id, values)
def instance_add_security_group(context, instance_id, security_group_id):
"""Associate the given security group with the given instance."""
return IMPL.instance_add_security_group(context, instance_id,
security_group_id)
def instance_get_vcpu_sum_by_host_and_project(context, hostname, proj_id):
"""Get instances.vcpus by host and project."""
return IMPL.instance_get_vcpu_sum_by_host_and_project(context,
hostname,
proj_id)
def instance_get_memory_sum_by_host_and_project(context, hostname, proj_id):
"""Get amount of memory by host and project."""
return IMPL.instance_get_memory_sum_by_host_and_project(context,
hostname,
proj_id)
def instance_get_disk_sum_by_host_and_project(context, hostname, proj_id):
"""Get total amount of disk by host and project."""
return IMPL.instance_get_disk_sum_by_host_and_project(context,
hostname,
proj_id)
def instance_action_create(context, values):
"""Create an instance action from the values dictionary."""
return IMPL.instance_action_create(context, values)
def instance_get_actions(context, instance_id):
"""Get instance actions by instance id."""
return IMPL.instance_get_actions(context, instance_id)
###################
def key_pair_create(context, values):
"""Create a key_pair from the values dictionary."""
return IMPL.key_pair_create(context, values)
def key_pair_destroy(context, user_id, name):
"""Destroy the key_pair or raise if it does not exist."""
return IMPL.key_pair_destroy(context, user_id, name)
def key_pair_destroy_all_by_user(context, user_id):
"""Destroy all key_pairs by user."""
return IMPL.key_pair_destroy_all_by_user(context, user_id)
def key_pair_get(context, user_id, name):
"""Get a key_pair or raise if it does not exist."""
return IMPL.key_pair_get(context, user_id, name)
def key_pair_get_all_by_user(context, user_id):
"""Get all key_pairs by user."""
return IMPL.key_pair_get_all_by_user(context, user_id)
####################
def network_associate(context, project_id):
"""Associate a free network to a project."""
return IMPL.network_associate(context, project_id)
def network_count(context):
"""Return the number of networks."""
return IMPL.network_count(context)
def network_count_allocated_ips(context, network_id):
"""Return the number of allocated non-reserved ips in the network."""
return IMPL.network_count_allocated_ips(context, network_id)
def network_count_available_ips(context, network_id):
"""Return the number of available ips in the network."""
return IMPL.network_count_available_ips(context, network_id)
def network_count_reserved_ips(context, network_id):
"""Return the number of reserved ips in the network."""
return IMPL.network_count_reserved_ips(context, network_id)
def network_create_safe(context, values):
"""Create a network from the values dict.
The network is only returned if the create succeeds. If the create violates
constraints because the network already exists, no exception is raised.
"""
return IMPL.network_create_safe(context, values)
def network_delete_safe(context, network_id):
"""Delete network with key network_id.
This method assumes that the network is not associated with any project
"""
return IMPL.network_delete_safe(context, network_id)
def network_create_fixed_ips(context, network_id, num_vpn_clients):
"""Create the ips for the network, reserving sepecified ips."""
return IMPL.network_create_fixed_ips(context, network_id, num_vpn_clients)
def network_disassociate(context, network_id):
"""Disassociate the network from project or raise if it does not exist."""
return IMPL.network_disassociate(context, network_id)
def network_disassociate_all(context):
"""Disassociate all networks from projects."""
return IMPL.network_disassociate_all(context)
def network_get(context, network_id):
"""Get an network or raise if it does not exist."""
return IMPL.network_get(context, network_id)
def network_get_all(context):
"""Return all defined networks."""
return IMPL.network_get_all(context)
# pylint: disable=C0103
def network_get_associated_fixed_ips(context, network_id):
"""Get all network's ips that have been associated."""
return IMPL.network_get_associated_fixed_ips(context, network_id)
def network_get_by_bridge(context, bridge):
"""Get a network by bridge or raise if it does not exist."""
return IMPL.network_get_by_bridge(context, bridge)
def network_get_by_cidr(context, cidr):
"""Get a network by cidr or raise if it does not exist"""
return IMPL.network_get_by_cidr(context, cidr)
def network_get_by_instance(context, instance_id):
"""Get a network by instance id or raise if it does not exist."""
return IMPL.network_get_by_instance(context, instance_id)
def network_get_all_by_instance(context, instance_id):
"""Get all networks by instance id or raise if none exist."""
return IMPL.network_get_all_by_instance(context, instance_id)
def network_get_index(context, network_id):
"""Get non-conflicting index for network."""
return IMPL.network_get_index(context, network_id)
def network_get_vpn_ip(context, network_id):
"""Get non-conflicting index for network."""
return IMPL.network_get_vpn_ip(context, network_id)
def network_set_cidr(context, network_id, cidr):
"""Set the Classless Inner Domain Routing for the network."""
return IMPL.network_set_cidr(context, network_id, cidr)
def network_set_host(context, network_id, host_id):
"""Safely set the host for network."""
return IMPL.network_set_host(context, network_id, host_id)
def network_update(context, network_id, values):
"""Set the given properties on an network and update it.
Raises NotFound if network does not exist.
"""
return IMPL.network_update(context, network_id, values)
###################
def project_get_network(context, project_id, associate=True):
"""Return the network associated with the project.
If associate is true, it will attempt to associate a new
network if one is not found, otherwise it returns None.
"""
return IMPL.project_get_network(context, project_id, associate)
def project_get_network_v6(context, project_id):
return IMPL.project_get_network_v6(context, project_id)
###################
def queue_get_for(context, topic, physical_node_id):
"""Return a channel to send a message to a node with a topic."""
return IMPL.queue_get_for(context, topic, physical_node_id)
###################
def export_device_count(context):
"""Return count of export devices."""
return IMPL.export_device_count(context)
def export_device_create_safe(context, values):
"""Create an export_device from the values dictionary.
The device is not returned. If the create violates the unique
constraints because the shelf_id and blade_id already exist,
no exception is raised.
"""
return IMPL.export_device_create_safe(context, values)
###################
def iscsi_target_count_by_host(context, host):
"""Return count of export devices."""
return IMPL.iscsi_target_count_by_host(context, host)
def iscsi_target_create_safe(context, values):
"""Create an iscsi_target from the values dictionary.
The device is not returned. If the create violates the unique
constraints because the iscsi_target and host already exist,
no exception is raised.
"""
return IMPL.iscsi_target_create_safe(context, values)
###############
def auth_token_destroy(context, token_id):
"""Destroy an auth token."""
return IMPL.auth_token_destroy(context, token_id)
def auth_token_get(context, token_hash):
"""Retrieves a token given the hash representing it."""
return IMPL.auth_token_get(context, token_hash)
def auth_token_update(context, token_hash, values):
"""Updates a token given the hash representing it."""
return IMPL.auth_token_update(context, token_hash, values)
def auth_token_create(context, token):
"""Creates a new token."""
return IMPL.auth_token_create(context, token)
###################
def quota_create(context, project_id, resource, limit):
"""Create a quota for the given project and resource."""
return IMPL.quota_create(context, project_id, resource, limit)
def quota_get(context, project_id, resource):
"""Retrieve a quota or raise if it does not exist."""
return IMPL.quota_get(context, project_id, resource)
def quota_get_all_by_project(context, project_id):
"""Retrieve all quotas associated with a given project."""
return IMPL.quota_get_all_by_project(context, project_id)
def quota_update(context, project_id, resource, limit):
"""Update a quota or raise if it does not exist."""
return IMPL.quota_update(context, project_id, resource, limit)
def quota_destroy(context, project_id, resource):
"""Destroy the quota or raise if it does not exist."""
return IMPL.quota_destroy(context, project_id, resource)
def quota_destroy_all_by_project(context, project_id):
"""Destroy all quotas associated with a given project."""
return IMPL.quota_get_all_by_project(context, project_id)
###################
def volume_allocate_shelf_and_blade(context, volume_id):
"""Atomically allocate a free shelf and blade from the pool."""
return IMPL.volume_allocate_shelf_and_blade(context, volume_id)
def volume_allocate_iscsi_target(context, volume_id, host):
"""Atomically allocate a free iscsi_target from the pool."""
return IMPL.volume_allocate_iscsi_target(context, volume_id, host)
def volume_attached(context, volume_id, instance_id, mountpoint):
"""Ensure that a volume is set as attached."""
return IMPL.volume_attached(context, volume_id, instance_id, mountpoint)
def volume_create(context, values):
"""Create a volume from the values dictionary."""
return IMPL.volume_create(context, values)
def volume_data_get_for_project(context, project_id):
"""Get (volume_count, gigabytes) for project."""
return IMPL.volume_data_get_for_project(context, project_id)
def volume_destroy(context, volume_id):
"""Destroy the volume or raise if it does not exist."""
return IMPL.volume_destroy(context, volume_id)
def volume_detached(context, volume_id):
"""Ensure that a volume is set as detached."""
return IMPL.volume_detached(context, volume_id)
def volume_get(context, volume_id):
"""Get a volume or raise if it does not exist."""
return IMPL.volume_get(context, volume_id)
def volume_get_all(context):
"""Get all volumes."""
return IMPL.volume_get_all(context)
def volume_get_all_by_host(context, host):
"""Get all volumes belonging to a host."""
return IMPL.volume_get_all_by_host(context, host)
def volume_get_all_by_instance(context, instance_id):
"""Get all volumes belonging to a instance."""
return IMPL.volume_get_all_by_instance(context, instance_id)
def volume_get_all_by_project(context, project_id):
"""Get all volumes belonging to a project."""
return IMPL.volume_get_all_by_project(context, project_id)
def volume_get_by_ec2_id(context, ec2_id):
"""Get a volume by ec2 id."""
return IMPL.volume_get_by_ec2_id(context, ec2_id)
def volume_get_instance(context, volume_id):
"""Get the instance that a volume is attached to."""
return IMPL.volume_get_instance(context, volume_id)
def volume_get_shelf_and_blade(context, volume_id):
"""Get the shelf and blade allocated to the volume."""
return IMPL.volume_get_shelf_and_blade(context, volume_id)
def volume_get_iscsi_target_num(context, volume_id):
"""Get the target num (tid) allocated to the volume."""
return IMPL.volume_get_iscsi_target_num(context, volume_id)
def volume_update(context, volume_id, values):
"""Set the given properties on an volume and update it.
Raises NotFound if volume does not exist.
"""
return IMPL.volume_update(context, volume_id, values)
####################
def security_group_get_all(context):
"""Get all security groups."""
return IMPL.security_group_get_all(context)
def security_group_get(context, security_group_id):
"""Get security group by its id."""
return IMPL.security_group_get(context, security_group_id)
def security_group_get_by_name(context, project_id, group_name):
"""Returns a security group with the specified name from a project."""
return IMPL.security_group_get_by_name(context, project_id, group_name)
def security_group_get_by_project(context, project_id):
"""Get all security groups belonging to a project."""
return IMPL.security_group_get_by_project(context, project_id)
def security_group_get_by_instance(context, instance_id):
"""Get security groups to which the instance is assigned."""
return IMPL.security_group_get_by_instance(context, instance_id)
def security_group_exists(context, project_id, group_name):
"""Indicates if a group name exists in a project."""
return IMPL.security_group_exists(context, project_id, group_name)
def security_group_create(context, values):
"""Create a new security group."""
return IMPL.security_group_create(context, values)
def security_group_destroy(context, security_group_id):
"""Deletes a security group."""
return IMPL.security_group_destroy(context, security_group_id)
def security_group_destroy_all(context):
"""Deletes a security group."""
return IMPL.security_group_destroy_all(context)
####################
def security_group_rule_create(context, values):
"""Create a new security group."""
return IMPL.security_group_rule_create(context, values)
def security_group_rule_get_by_security_group(context, security_group_id):
"""Get all rules for a a given security group."""
return IMPL.security_group_rule_get_by_security_group(context,
security_group_id)
def security_group_rule_get_by_security_group_grantee(context,
security_group_id):
"""Get all rules that grant access to the given security group."""
return IMPL.security_group_rule_get_by_security_group_grantee(context,
security_group_id)
def security_group_rule_destroy(context, security_group_rule_id):
"""Deletes a security group rule."""
return IMPL.security_group_rule_destroy(context, security_group_rule_id)
###################
def user_get(context, id):
"""Get user by id."""
return IMPL.user_get(context, id)
def user_get_by_uid(context, uid):
"""Get user by uid."""
return IMPL.user_get_by_uid(context, uid)
def user_get_by_access_key(context, access_key):
"""Get user by access key."""
return IMPL.user_get_by_access_key(context, access_key)
def user_create(context, values):
"""Create a new user."""
return IMPL.user_create(context, values)
def user_delete(context, id):
"""Delete a user."""
return IMPL.user_delete(context, id)
def user_get_all(context):
"""Create a new user."""
return IMPL.user_get_all(context)
def user_add_role(context, user_id, role):
"""Add another global role for user."""
return IMPL.user_add_role(context, user_id, role)
def user_remove_role(context, user_id, role):
"""Remove global role from user."""
return IMPL.user_remove_role(context, user_id, role)
def user_get_roles(context, user_id):
"""Get global roles for user."""
return IMPL.user_get_roles(context, user_id)
def user_add_project_role(context, user_id, project_id, role):
"""Add project role for user."""
return IMPL.user_add_project_role(context, user_id, project_id, role)
def user_remove_project_role(context, user_id, project_id, role):
"""Remove project role from user."""
return IMPL.user_remove_project_role(context, user_id, project_id, role)
def user_get_roles_for_project(context, user_id, project_id):
"""Return list of roles a user holds on project."""
return IMPL.user_get_roles_for_project(context, user_id, project_id)
def user_update(context, user_id, values):
"""Update user."""
return IMPL.user_update(context, user_id, values)
def project_get(context, id):
"""Get project by id."""
return IMPL.project_get(context, id)
def project_create(context, values):
"""Create a new project."""
return IMPL.project_create(context, values)
def project_add_member(context, project_id, user_id):
"""Add user to project."""
return IMPL.project_add_member(context, project_id, user_id)
def project_get_all(context):
"""Get all projects."""
return IMPL.project_get_all(context)
def project_get_by_user(context, user_id):
"""Get all projects of which the given user is a member."""
return IMPL.project_get_by_user(context, user_id)
def project_remove_member(context, project_id, user_id):
"""Remove the given user from the given project."""
return IMPL.project_remove_member(context, project_id, user_id)
def project_update(context, project_id, values):
"""Update Remove the given user from the given project."""
return IMPL.project_update(context, project_id, values)
def project_delete(context, project_id):
"""Delete project."""
return IMPL.project_delete(context, project_id)
###################
def host_get_networks(context, host):
"""All networks for which the given host is the network host."""
return IMPL.host_get_networks(context, host)
##################
def console_pool_create(context, values):
"""Create console pool."""
return IMPL.console_pool_create(context, values)
def console_pool_get(context, pool_id):
"""Get a console pool."""
return IMPL.console_pool_get(context, pool_id)
def console_pool_get_by_host_type(context, compute_host, proxy_host,
console_type):
"""Fetch a console pool for a given proxy host, compute host, and type."""
return IMPL.console_pool_get_by_host_type(context,
compute_host,
proxy_host,
console_type)
def console_pool_get_all_by_host_type(context, host, console_type):
"""Fetch all pools for given proxy host and type."""
return IMPL.console_pool_get_all_by_host_type(context,
host,
console_type)
def console_create(context, values):
"""Create a console."""
return IMPL.console_create(context, values)
def console_delete(context, console_id):
"""Delete a console."""
return IMPL.console_delete(context, console_id)
def console_get_by_pool_instance(context, pool_id, instance_id):
"""Get console entry for a given instance and pool."""
return IMPL.console_get_by_pool_instance(context, pool_id, instance_id)
def console_get_all_by_instance(context, instance_id):
"""Get consoles for a given instance."""
return IMPL.console_get_all_by_instance(context, instance_id)
def console_get(context, console_id, instance_id=None):
"""Get a specific console (possibly on a given instance)."""
return IMPL.console_get(context, console_id, instance_id)
##################
def instance_type_create(context, values):
"""Create a new instance type."""
return IMPL.instance_type_create(context, values)
def instance_type_get_all(context, inactive=False):
"""Get all instance types."""
return IMPL.instance_type_get_all(context, inactive)
def instance_type_get_by_id(context, id):
"""Get instance type by id."""
return IMPL.instance_type_get_by_id(context, id)
def instance_type_get_by_name(context, name):
"""Get instance type by name."""
return IMPL.instance_type_get_by_name(context, name)
def instance_type_get_by_flavor_id(context, id):
"""Get instance type by name."""
return IMPL.instance_type_get_by_flavor_id(context, id)
def instance_type_destroy(context, name):
"""Delete a instance type."""
return IMPL.instance_type_destroy(context, name)
def instance_type_purge(context, name):
"""Purges (removes) an instance type from DB.
Use instance_type_destroy for most cases
"""
return IMPL.instance_type_purge(context, name)
####################
def zone_create(context, values):
"""Create a new child Zone entry."""
return IMPL.zone_create(context, values)
def zone_update(context, zone_id, values):
"""Update a child Zone entry."""
return IMPL.zone_update(context, values)
def zone_delete(context, zone_id):
"""Delete a child Zone."""
return IMPL.zone_delete(context, zone_id)
def zone_get(context, zone_id):
"""Get a specific child Zone."""
return IMPL.zone_get(context, zone_id)
def zone_get_all(context):
"""Get all child Zones."""
return IMPL.zone_get_all(context)
####################
def instance_metadata_get(context, instance_id):
"""Get all metadata for an instance."""
return IMPL.instance_metadata_get(context, instance_id)
def instance_metadata_delete(context, instance_id, key):
"""Delete the given metadata item."""
IMPL.instance_metadata_delete(context, instance_id, key)
def instance_metadata_update_or_create(context, instance_id, metadata):
"""Create or update instance metadata."""
IMPL.instance_metadata_update_or_create(context, instance_id, metadata)
| |
"""
pygments.lexers.basic
~~~~~~~~~~~~~~~~~~~~~
Lexers for BASIC like languages (other than VB.net).
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, default, words, include
from pygments.token import Comment, Error, Keyword, Name, Number, \
Punctuation, Operator, String, Text, Whitespace
from pygments.lexers import _vbscript_builtins
__all__ = ['BlitzBasicLexer', 'BlitzMaxLexer', 'MonkeyLexer', 'CbmBasicV2Lexer',
'QBasicLexer', 'VBScriptLexer', 'BBCBasicLexer']
class BlitzMaxLexer(RegexLexer):
"""
For `BlitzMax <http://blitzbasic.com>`_ source code.
.. versionadded:: 1.4
"""
name = 'BlitzMax'
aliases = ['blitzmax', 'bmax']
filenames = ['*.bmx']
mimetypes = ['text/x-bmx']
bmax_vopwords = r'\b(Shl|Shr|Sar|Mod)\b'
bmax_sktypes = r'@{1,2}|[!#$%]'
bmax_lktypes = r'\b(Int|Byte|Short|Float|Double|Long)\b'
bmax_name = r'[a-z_]\w*'
bmax_var = (r'(%s)(?:(?:([ \t]*)(%s)|([ \t]*:[ \t]*\b(?:Shl|Shr|Sar|Mod)\b)'
r'|([ \t]*)(:)([ \t]*)(?:%s|(%s)))(?:([ \t]*)(Ptr))?)') % \
(bmax_name, bmax_sktypes, bmax_lktypes, bmax_name)
bmax_func = bmax_var + r'?((?:[ \t]|\.\.\n)*)([(])'
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
# Text
(r'[ \t]+', Text),
(r'\.\.\n', Text), # Line continuation
# Comments
(r"'.*?\n", Comment.Single),
(r'([ \t]*)\bRem\n(\n|.)*?\s*\bEnd([ \t]*)Rem', Comment.Multiline),
# Data types
('"', String.Double, 'string'),
# Numbers
(r'[0-9]+\.[0-9]*(?!\.)', Number.Float),
(r'\.[0-9]*(?!\.)', Number.Float),
(r'[0-9]+', Number.Integer),
(r'\$[0-9a-f]+', Number.Hex),
(r'\%[10]+', Number.Bin),
# Other
(r'(?:(?:(:)?([ \t]*)(:?%s|([+\-*/&|~]))|Or|And|Not|[=<>^]))' %
(bmax_vopwords), Operator),
(r'[(),.:\[\]]', Punctuation),
(r'(?:#[\w \t]*)', Name.Label),
(r'(?:\?[\w \t]*)', Comment.Preproc),
# Identifiers
(r'\b(New)\b([ \t]?)([(]?)(%s)' % (bmax_name),
bygroups(Keyword.Reserved, Text, Punctuation, Name.Class)),
(r'\b(Import|Framework|Module)([ \t]+)(%s\.%s)' %
(bmax_name, bmax_name),
bygroups(Keyword.Reserved, Text, Keyword.Namespace)),
(bmax_func, bygroups(Name.Function, Text, Keyword.Type,
Operator, Text, Punctuation, Text,
Keyword.Type, Name.Class, Text,
Keyword.Type, Text, Punctuation)),
(bmax_var, bygroups(Name.Variable, Text, Keyword.Type, Operator,
Text, Punctuation, Text, Keyword.Type,
Name.Class, Text, Keyword.Type)),
(r'\b(Type|Extends)([ \t]+)(%s)' % (bmax_name),
bygroups(Keyword.Reserved, Text, Name.Class)),
# Keywords
(r'\b(Ptr)\b', Keyword.Type),
(r'\b(Pi|True|False|Null|Self|Super)\b', Keyword.Constant),
(r'\b(Local|Global|Const|Field)\b', Keyword.Declaration),
(words((
'TNullMethodException', 'TNullFunctionException',
'TNullObjectException', 'TArrayBoundsException',
'TRuntimeException'), prefix=r'\b', suffix=r'\b'), Name.Exception),
(words((
'Strict', 'SuperStrict', 'Module', 'ModuleInfo',
'End', 'Return', 'Continue', 'Exit', 'Public', 'Private',
'Var', 'VarPtr', 'Chr', 'Len', 'Asc', 'SizeOf', 'Sgn', 'Abs', 'Min', 'Max',
'New', 'Release', 'Delete', 'Incbin', 'IncbinPtr', 'IncbinLen',
'Framework', 'Include', 'Import', 'Extern', 'EndExtern',
'Function', 'EndFunction', 'Type', 'EndType', 'Extends', 'Method', 'EndMethod',
'Abstract', 'Final', 'If', 'Then', 'Else', 'ElseIf', 'EndIf',
'For', 'To', 'Next', 'Step', 'EachIn', 'While', 'Wend', 'EndWhile',
'Repeat', 'Until', 'Forever', 'Select', 'Case', 'Default', 'EndSelect',
'Try', 'Catch', 'EndTry', 'Throw', 'Assert', 'Goto', 'DefData', 'ReadData',
'RestoreData'), prefix=r'\b', suffix=r'\b'),
Keyword.Reserved),
# Final resolve (for variable names and such)
(r'(%s)' % (bmax_name), Name.Variable),
],
'string': [
(r'""', String.Double),
(r'"C?', String.Double, '#pop'),
(r'[^"]+', String.Double),
],
}
class BlitzBasicLexer(RegexLexer):
"""
For `BlitzBasic <http://blitzbasic.com>`_ source code.
.. versionadded:: 2.0
"""
name = 'BlitzBasic'
aliases = ['blitzbasic', 'b3d', 'bplus']
filenames = ['*.bb', '*.decls']
mimetypes = ['text/x-bb']
bb_sktypes = r'@{1,2}|[#$%]'
bb_name = r'[a-z]\w*'
bb_var = (r'(%s)(?:([ \t]*)(%s)|([ \t]*)([.])([ \t]*)(?:(%s)))?') % \
(bb_name, bb_sktypes, bb_name)
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
# Text
(r'[ \t]+', Text),
# Comments
(r";.*?\n", Comment.Single),
# Data types
('"', String.Double, 'string'),
# Numbers
(r'[0-9]+\.[0-9]*(?!\.)', Number.Float),
(r'\.[0-9]+(?!\.)', Number.Float),
(r'[0-9]+', Number.Integer),
(r'\$[0-9a-f]+', Number.Hex),
(r'\%[10]+', Number.Bin),
# Other
(words(('Shl', 'Shr', 'Sar', 'Mod', 'Or', 'And', 'Not',
'Abs', 'Sgn', 'Handle', 'Int', 'Float', 'Str',
'First', 'Last', 'Before', 'After'),
prefix=r'\b', suffix=r'\b'),
Operator),
(r'([+\-*/~=<>^])', Operator),
(r'[(),:\[\]\\]', Punctuation),
(r'\.([ \t]*)(%s)' % bb_name, Name.Label),
# Identifiers
(r'\b(New)\b([ \t]+)(%s)' % (bb_name),
bygroups(Keyword.Reserved, Text, Name.Class)),
(r'\b(Gosub|Goto)\b([ \t]+)(%s)' % (bb_name),
bygroups(Keyword.Reserved, Text, Name.Label)),
(r'\b(Object)\b([ \t]*)([.])([ \t]*)(%s)\b' % (bb_name),
bygroups(Operator, Text, Punctuation, Text, Name.Class)),
(r'\b%s\b([ \t]*)(\()' % bb_var,
bygroups(Name.Function, Text, Keyword.Type, Text, Punctuation,
Text, Name.Class, Text, Punctuation)),
(r'\b(Function)\b([ \t]+)%s' % bb_var,
bygroups(Keyword.Reserved, Text, Name.Function, Text, Keyword.Type,
Text, Punctuation, Text, Name.Class)),
(r'\b(Type)([ \t]+)(%s)' % (bb_name),
bygroups(Keyword.Reserved, Text, Name.Class)),
# Keywords
(r'\b(Pi|True|False|Null)\b', Keyword.Constant),
(r'\b(Local|Global|Const|Field|Dim)\b', Keyword.Declaration),
(words((
'End', 'Return', 'Exit', 'Chr', 'Len', 'Asc', 'New', 'Delete', 'Insert',
'Include', 'Function', 'Type', 'If', 'Then', 'Else', 'ElseIf', 'EndIf',
'For', 'To', 'Next', 'Step', 'Each', 'While', 'Wend',
'Repeat', 'Until', 'Forever', 'Select', 'Case', 'Default',
'Goto', 'Gosub', 'Data', 'Read', 'Restore'), prefix=r'\b', suffix=r'\b'),
Keyword.Reserved),
# Final resolve (for variable names and such)
# (r'(%s)' % (bb_name), Name.Variable),
(bb_var, bygroups(Name.Variable, Text, Keyword.Type,
Text, Punctuation, Text, Name.Class)),
],
'string': [
(r'""', String.Double),
(r'"C?', String.Double, '#pop'),
(r'[^"]+', String.Double),
],
}
class MonkeyLexer(RegexLexer):
"""
For
`Monkey <https://en.wikipedia.org/wiki/Monkey_(programming_language)>`_
source code.
.. versionadded:: 1.6
"""
name = 'Monkey'
aliases = ['monkey']
filenames = ['*.monkey']
mimetypes = ['text/x-monkey']
name_variable = r'[a-z_]\w*'
name_function = r'[A-Z]\w*'
name_constant = r'[A-Z_][A-Z0-9_]*'
name_class = r'[A-Z]\w*'
name_module = r'[a-z0-9_]*'
keyword_type = r'(?:Int|Float|String|Bool|Object|Array|Void)'
# ? == Bool // % == Int // # == Float // $ == String
keyword_type_special = r'[?%#$]'
flags = re.MULTILINE
tokens = {
'root': [
# Text
(r'\s+', Text),
# Comments
(r"'.*", Comment),
(r'(?i)^#rem\b', Comment.Multiline, 'comment'),
# preprocessor directives
(r'(?i)^(?:#If|#ElseIf|#Else|#EndIf|#End|#Print|#Error)\b', Comment.Preproc),
# preprocessor variable (any line starting with '#' that is not a directive)
(r'^#', Comment.Preproc, 'variables'),
# String
('"', String.Double, 'string'),
# Numbers
(r'[0-9]+\.[0-9]*(?!\.)', Number.Float),
(r'\.[0-9]+(?!\.)', Number.Float),
(r'[0-9]+', Number.Integer),
(r'\$[0-9a-fA-Z]+', Number.Hex),
(r'\%[10]+', Number.Bin),
# Native data types
(r'\b%s\b' % keyword_type, Keyword.Type),
# Exception handling
(r'(?i)\b(?:Try|Catch|Throw)\b', Keyword.Reserved),
(r'Throwable', Name.Exception),
# Builtins
(r'(?i)\b(?:Null|True|False)\b', Name.Builtin),
(r'(?i)\b(?:Self|Super)\b', Name.Builtin.Pseudo),
(r'\b(?:HOST|LANG|TARGET|CONFIG)\b', Name.Constant),
# Keywords
(r'(?i)^(Import)(\s+)(.*)(\n)',
bygroups(Keyword.Namespace, Text, Name.Namespace, Text)),
(r'(?i)^Strict\b.*\n', Keyword.Reserved),
(r'(?i)(Const|Local|Global|Field)(\s+)',
bygroups(Keyword.Declaration, Text), 'variables'),
(r'(?i)(New|Class|Interface|Extends|Implements)(\s+)',
bygroups(Keyword.Reserved, Text), 'classname'),
(r'(?i)(Function|Method)(\s+)',
bygroups(Keyword.Reserved, Text), 'funcname'),
(r'(?i)(?:End|Return|Public|Private|Extern|Property|'
r'Final|Abstract)\b', Keyword.Reserved),
# Flow Control stuff
(r'(?i)(?:If|Then|Else|ElseIf|EndIf|'
r'Select|Case|Default|'
r'While|Wend|'
r'Repeat|Until|Forever|'
r'For|To|Until|Step|EachIn|Next|'
r'Exit|Continue)\s+', Keyword.Reserved),
# not used yet
(r'(?i)\b(?:Module|Inline)\b', Keyword.Reserved),
# Array
(r'[\[\]]', Punctuation),
# Other
(r'<=|>=|<>|\*=|/=|\+=|-=|&=|~=|\|=|[-&*/^+=<>|~]', Operator),
(r'(?i)(?:Not|Mod|Shl|Shr|And|Or)', Operator.Word),
(r'[(){}!#,.:]', Punctuation),
# catch the rest
(r'%s\b' % name_constant, Name.Constant),
(r'%s\b' % name_function, Name.Function),
(r'%s\b' % name_variable, Name.Variable),
],
'funcname': [
(r'(?i)%s\b' % name_function, Name.Function),
(r':', Punctuation, 'classname'),
(r'\s+', Text),
(r'\(', Punctuation, 'variables'),
(r'\)', Punctuation, '#pop')
],
'classname': [
(r'%s\.' % name_module, Name.Namespace),
(r'%s\b' % keyword_type, Keyword.Type),
(r'%s\b' % name_class, Name.Class),
# array (of given size)
(r'(\[)(\s*)(\d*)(\s*)(\])',
bygroups(Punctuation, Text, Number.Integer, Text, Punctuation)),
# generics
(r'\s+(?!<)', Text, '#pop'),
(r'<', Punctuation, '#push'),
(r'>', Punctuation, '#pop'),
(r'\n', Text, '#pop'),
default('#pop')
],
'variables': [
(r'%s\b' % name_constant, Name.Constant),
(r'%s\b' % name_variable, Name.Variable),
(r'%s' % keyword_type_special, Keyword.Type),
(r'\s+', Text),
(r':', Punctuation, 'classname'),
(r',', Punctuation, '#push'),
default('#pop')
],
'string': [
(r'[^"~]+', String.Double),
(r'~q|~n|~r|~t|~z|~~', String.Escape),
(r'"', String.Double, '#pop'),
],
'comment': [
(r'(?i)^#rem.*?', Comment.Multiline, "#push"),
(r'(?i)^#end.*?', Comment.Multiline, "#pop"),
(r'\n', Comment.Multiline),
(r'.+', Comment.Multiline),
],
}
class CbmBasicV2Lexer(RegexLexer):
"""
For CBM BASIC V2 sources.
.. versionadded:: 1.6
"""
name = 'CBM BASIC V2'
aliases = ['cbmbas']
filenames = ['*.bas']
flags = re.IGNORECASE
tokens = {
'root': [
(r'rem.*\n', Comment.Single),
(r'\s+', Text),
(r'new|run|end|for|to|next|step|go(to|sub)?|on|return|stop|cont'
r'|if|then|input#?|read|wait|load|save|verify|poke|sys|print#?'
r'|list|clr|cmd|open|close|get#?', Keyword.Reserved),
(r'data|restore|dim|let|def|fn', Keyword.Declaration),
(r'tab|spc|sgn|int|abs|usr|fre|pos|sqr|rnd|log|exp|cos|sin|tan|atn'
r'|peek|len|val|asc|(str|chr|left|right|mid)\$', Name.Builtin),
(r'[-+*/^<>=]', Operator),
(r'not|and|or', Operator.Word),
(r'"[^"\n]*.', String),
(r'\d+|[-+]?\d*\.\d*(e[-+]?\d+)?', Number.Float),
(r'[(),:;]', Punctuation),
(r'\w+[$%]?', Name),
]
}
def analyse_text(text):
# if it starts with a line number, it shouldn't be a "modern" Basic
# like VB.net
if re.match(r'^\d+', text):
return 0.2
class QBasicLexer(RegexLexer):
"""
For
`QBasic <http://en.wikipedia.org/wiki/QBasic>`_
source code.
.. versionadded:: 2.0
"""
name = 'QBasic'
aliases = ['qbasic', 'basic']
filenames = ['*.BAS', '*.bas']
mimetypes = ['text/basic']
declarations = ('DATA', 'LET')
functions = (
'ABS', 'ASC', 'ATN', 'CDBL', 'CHR$', 'CINT', 'CLNG',
'COMMAND$', 'COS', 'CSNG', 'CSRLIN', 'CVD', 'CVDMBF', 'CVI',
'CVL', 'CVS', 'CVSMBF', 'DATE$', 'ENVIRON$', 'EOF', 'ERDEV',
'ERDEV$', 'ERL', 'ERR', 'EXP', 'FILEATTR', 'FIX', 'FRE',
'FREEFILE', 'HEX$', 'INKEY$', 'INP', 'INPUT$', 'INSTR', 'INT',
'IOCTL$', 'LBOUND', 'LCASE$', 'LEFT$', 'LEN', 'LOC', 'LOF',
'LOG', 'LPOS', 'LTRIM$', 'MID$', 'MKD$', 'MKDMBF$', 'MKI$',
'MKL$', 'MKS$', 'MKSMBF$', 'OCT$', 'PEEK', 'PEN', 'PLAY',
'PMAP', 'POINT', 'POS', 'RIGHT$', 'RND', 'RTRIM$', 'SADD',
'SCREEN', 'SEEK', 'SETMEM', 'SGN', 'SIN', 'SPACE$', 'SPC',
'SQR', 'STICK', 'STR$', 'STRIG', 'STRING$', 'TAB', 'TAN',
'TIME$', 'TIMER', 'UBOUND', 'UCASE$', 'VAL', 'VARPTR',
'VARPTR$', 'VARSEG'
)
metacommands = ('$DYNAMIC', '$INCLUDE', '$STATIC')
operators = ('AND', 'EQV', 'IMP', 'NOT', 'OR', 'XOR')
statements = (
'BEEP', 'BLOAD', 'BSAVE', 'CALL', 'CALL ABSOLUTE',
'CALL INTERRUPT', 'CALLS', 'CHAIN', 'CHDIR', 'CIRCLE', 'CLEAR',
'CLOSE', 'CLS', 'COLOR', 'COM', 'COMMON', 'CONST', 'DATA',
'DATE$', 'DECLARE', 'DEF FN', 'DEF SEG', 'DEFDBL', 'DEFINT',
'DEFLNG', 'DEFSNG', 'DEFSTR', 'DEF', 'DIM', 'DO', 'LOOP',
'DRAW', 'END', 'ENVIRON', 'ERASE', 'ERROR', 'EXIT', 'FIELD',
'FILES', 'FOR', 'NEXT', 'FUNCTION', 'GET', 'GOSUB', 'GOTO',
'IF', 'THEN', 'INPUT', 'INPUT #', 'IOCTL', 'KEY', 'KEY',
'KILL', 'LET', 'LINE', 'LINE INPUT', 'LINE INPUT #', 'LOCATE',
'LOCK', 'UNLOCK', 'LPRINT', 'LSET', 'MID$', 'MKDIR', 'NAME',
'ON COM', 'ON ERROR', 'ON KEY', 'ON PEN', 'ON PLAY',
'ON STRIG', 'ON TIMER', 'ON UEVENT', 'ON', 'OPEN', 'OPEN COM',
'OPTION BASE', 'OUT', 'PAINT', 'PALETTE', 'PCOPY', 'PEN',
'PLAY', 'POKE', 'PRESET', 'PRINT', 'PRINT #', 'PRINT USING',
'PSET', 'PUT', 'PUT', 'RANDOMIZE', 'READ', 'REDIM', 'REM',
'RESET', 'RESTORE', 'RESUME', 'RETURN', 'RMDIR', 'RSET', 'RUN',
'SCREEN', 'SEEK', 'SELECT CASE', 'SHARED', 'SHELL', 'SLEEP',
'SOUND', 'STATIC', 'STOP', 'STRIG', 'SUB', 'SWAP', 'SYSTEM',
'TIME$', 'TIMER', 'TROFF', 'TRON', 'TYPE', 'UEVENT', 'UNLOCK',
'VIEW', 'WAIT', 'WHILE', 'WEND', 'WIDTH', 'WINDOW', 'WRITE'
)
keywords = (
'ACCESS', 'ALIAS', 'ANY', 'APPEND', 'AS', 'BASE', 'BINARY',
'BYVAL', 'CASE', 'CDECL', 'DOUBLE', 'ELSE', 'ELSEIF', 'ENDIF',
'INTEGER', 'IS', 'LIST', 'LOCAL', 'LONG', 'LOOP', 'MOD',
'NEXT', 'OFF', 'ON', 'OUTPUT', 'RANDOM', 'SIGNAL', 'SINGLE',
'STEP', 'STRING', 'THEN', 'TO', 'UNTIL', 'USING', 'WEND'
)
tokens = {
'root': [
(r'\n+', Text),
(r'\s+', Text.Whitespace),
(r'^(\s*)(\d*)(\s*)(REM .*)$',
bygroups(Text.Whitespace, Name.Label, Text.Whitespace,
Comment.Single)),
(r'^(\s*)(\d+)(\s*)',
bygroups(Text.Whitespace, Name.Label, Text.Whitespace)),
(r'(?=[\s]*)(\w+)(?=[\s]*=)', Name.Variable.Global),
(r'(?=[^"]*)\'.*$', Comment.Single),
(r'"[^\n"]*"', String.Double),
(r'(END)(\s+)(FUNCTION|IF|SELECT|SUB)',
bygroups(Keyword.Reserved, Text.Whitespace, Keyword.Reserved)),
(r'(DECLARE)(\s+)([A-Z]+)(\s+)(\S+)',
bygroups(Keyword.Declaration, Text.Whitespace, Name.Variable,
Text.Whitespace, Name)),
(r'(DIM)(\s+)(SHARED)(\s+)([^\s(]+)',
bygroups(Keyword.Declaration, Text.Whitespace, Name.Variable,
Text.Whitespace, Name.Variable.Global)),
(r'(DIM)(\s+)([^\s(]+)',
bygroups(Keyword.Declaration, Text.Whitespace, Name.Variable.Global)),
(r'^(\s*)([a-zA-Z_]+)(\s*)(\=)',
bygroups(Text.Whitespace, Name.Variable.Global, Text.Whitespace,
Operator)),
(r'(GOTO|GOSUB)(\s+)(\w+\:?)',
bygroups(Keyword.Reserved, Text.Whitespace, Name.Label)),
(r'(SUB)(\s+)(\w+\:?)',
bygroups(Keyword.Reserved, Text.Whitespace, Name.Label)),
include('declarations'),
include('functions'),
include('metacommands'),
include('operators'),
include('statements'),
include('keywords'),
(r'[a-zA-Z_]\w*[$@#&!]', Name.Variable.Global),
(r'[a-zA-Z_]\w*\:', Name.Label),
(r'\-?\d*\.\d+[@|#]?', Number.Float),
(r'\-?\d+[@|#]', Number.Float),
(r'\-?\d+#?', Number.Integer.Long),
(r'\-?\d+#?', Number.Integer),
(r'!=|==|:=|\.=|<<|>>|[-~+/\\*%=<>&^|?:!.]', Operator),
(r'[\[\]{}(),;]', Punctuation),
(r'[\w]+', Name.Variable.Global),
],
# can't use regular \b because of X$()
# XXX: use words() here
'declarations': [
(r'\b(%s)(?=\(|\b)' % '|'.join(map(re.escape, declarations)),
Keyword.Declaration),
],
'functions': [
(r'\b(%s)(?=\(|\b)' % '|'.join(map(re.escape, functions)),
Keyword.Reserved),
],
'metacommands': [
(r'\b(%s)(?=\(|\b)' % '|'.join(map(re.escape, metacommands)),
Keyword.Constant),
],
'operators': [
(r'\b(%s)(?=\(|\b)' % '|'.join(map(re.escape, operators)), Operator.Word),
],
'statements': [
(r'\b(%s)\b' % '|'.join(map(re.escape, statements)),
Keyword.Reserved),
],
'keywords': [
(r'\b(%s)\b' % '|'.join(keywords), Keyword),
],
}
def analyse_text(text):
if '$DYNAMIC' in text or '$STATIC' in text:
return 0.9
class VBScriptLexer(RegexLexer):
"""
VBScript is scripting language that is modeled on Visual Basic.
.. versionadded:: 2.4
"""
name = 'VBScript'
aliases = ['vbscript']
filenames = ['*.vbs', '*.VBS']
flags = re.IGNORECASE
tokens = {
'root': [
(r"'[^\n]*", Comment.Single),
(r'\s+', Whitespace),
('"', String.Double, 'string'),
('&h[0-9a-f]+', Number.Hex),
# Float variant 1, for example: 1., 1.e2, 1.2e3
(r'[0-9]+\.[0-9]*(e[+-]?[0-9]+)?', Number.Float),
(r'\.[0-9]+(e[+-]?[0-9]+)?', Number.Float), # Float variant 2, for example: .1, .1e2
(r'[0-9]+e[+-]?[0-9]+', Number.Float), # Float variant 3, for example: 123e45
(r'[0-9]+', Number.Integer),
('#.+#', String), # date or time value
(r'(dim)(\s+)([a-z_][a-z0-9_]*)',
bygroups(Keyword.Declaration, Whitespace, Name.Variable), 'dim_more'),
(r'(function|sub)(\s+)([a-z_][a-z0-9_]*)',
bygroups(Keyword.Declaration, Whitespace, Name.Function)),
(r'(class)(\s+)([a-z_][a-z0-9_]*)',
bygroups(Keyword.Declaration, Whitespace, Name.Class)),
(r'(const)(\s+)([a-z_][a-z0-9_]*)',
bygroups(Keyword.Declaration, Whitespace, Name.Constant)),
(r'(end)(\s+)(class|function|if|property|sub|with)',
bygroups(Keyword, Whitespace, Keyword)),
(r'(on)(\s+)(error)(\s+)(goto)(\s+)(0)',
bygroups(Keyword, Whitespace, Keyword, Whitespace, Keyword, Whitespace, Number.Integer)),
(r'(on)(\s+)(error)(\s+)(resume)(\s+)(next)',
bygroups(Keyword, Whitespace, Keyword, Whitespace, Keyword, Whitespace, Keyword)),
(r'(option)(\s+)(explicit)', bygroups(Keyword, Whitespace, Keyword)),
(r'(property)(\s+)(get|let|set)(\s+)([a-z_][a-z0-9_]*)',
bygroups(Keyword.Declaration, Whitespace, Keyword.Declaration, Whitespace, Name.Property)),
(r'rem\s.*[^\n]*', Comment.Single),
(words(_vbscript_builtins.KEYWORDS, suffix=r'\b'), Keyword),
(words(_vbscript_builtins.OPERATORS), Operator),
(words(_vbscript_builtins.OPERATOR_WORDS, suffix=r'\b'), Operator.Word),
(words(_vbscript_builtins.BUILTIN_CONSTANTS, suffix=r'\b'), Name.Constant),
(words(_vbscript_builtins.BUILTIN_FUNCTIONS, suffix=r'\b'), Name.Builtin),
(words(_vbscript_builtins.BUILTIN_VARIABLES, suffix=r'\b'), Name.Builtin),
(r'[a-z_][a-z0-9_]*', Name),
(r'\b_\n', Operator),
(words(r'(),.:'), Punctuation),
(r'.+(\n)?', Error)
],
'dim_more': [
(r'(\s*)(,)(\s*)([a-z_][a-z0-9]*)',
bygroups(Whitespace, Punctuation, Whitespace, Name.Variable)),
default('#pop'),
],
'string': [
(r'[^"\n]+', String.Double),
(r'\"\"', String.Double),
(r'"', String.Double, '#pop'),
(r'\n', Error, '#pop'), # Unterminated string
],
}
class BBCBasicLexer(RegexLexer):
"""
BBC Basic was supplied on the BBC Micro, and later Acorn RISC OS.
It is also used by BBC Basic For Windows.
.. versionadded:: 2.4
"""
base_keywords = ['OTHERWISE', 'AND', 'DIV', 'EOR', 'MOD', 'OR', 'ERROR',
'LINE', 'OFF', 'STEP', 'SPC', 'TAB', 'ELSE', 'THEN',
'OPENIN', 'PTR', 'PAGE', 'TIME', 'LOMEM', 'HIMEM', 'ABS',
'ACS', 'ADVAL', 'ASC', 'ASN', 'ATN', 'BGET', 'COS', 'COUNT',
'DEG', 'ERL', 'ERR', 'EVAL', 'EXP', 'EXT', 'FALSE', 'FN',
'GET', 'INKEY', 'INSTR', 'INT', 'LEN', 'LN', 'LOG', 'NOT',
'OPENUP', 'OPENOUT', 'PI', 'POINT', 'POS', 'RAD', 'RND',
'SGN', 'SIN', 'SQR', 'TAN', 'TO', 'TRUE', 'USR', 'VAL',
'VPOS', 'CHR$', 'GET$', 'INKEY$', 'LEFT$', 'MID$',
'RIGHT$', 'STR$', 'STRING$', 'EOF', 'PTR', 'PAGE', 'TIME',
'LOMEM', 'HIMEM', 'SOUND', 'BPUT', 'CALL', 'CHAIN', 'CLEAR',
'CLOSE', 'CLG', 'CLS', 'DATA', 'DEF', 'DIM', 'DRAW', 'END',
'ENDPROC', 'ENVELOPE', 'FOR', 'GOSUB', 'GOTO', 'GCOL', 'IF',
'INPUT', 'LET', 'LOCAL', 'MODE', 'MOVE', 'NEXT', 'ON',
'VDU', 'PLOT', 'PRINT', 'PROC', 'READ', 'REM', 'REPEAT',
'REPORT', 'RESTORE', 'RETURN', 'RUN', 'STOP', 'COLOUR',
'TRACE', 'UNTIL', 'WIDTH', 'OSCLI']
basic5_keywords = ['WHEN', 'OF', 'ENDCASE', 'ENDIF', 'ENDWHILE', 'CASE',
'CIRCLE', 'FILL', 'ORIGIN', 'POINT', 'RECTANGLE', 'SWAP',
'WHILE', 'WAIT', 'MOUSE', 'QUIT', 'SYS', 'INSTALL',
'LIBRARY', 'TINT', 'ELLIPSE', 'BEATS', 'TEMPO', 'VOICES',
'VOICE', 'STEREO', 'OVERLAY', 'APPEND', 'AUTO', 'CRUNCH',
'DELETE', 'EDIT', 'HELP', 'LIST', 'LOAD', 'LVAR', 'NEW',
'OLD', 'RENUMBER', 'SAVE', 'TEXTLOAD', 'TEXTSAVE',
'TWIN', 'TWINO', 'INSTALL', 'SUM', 'BEAT']
name = 'BBC Basic'
aliases = ['bbcbasic']
filenames = ['*.bbc']
tokens = {
'root': [
(r"[0-9]+", Name.Label),
(r"(\*)([^\n]*)",
bygroups(Keyword.Pseudo, Comment.Special)),
default('code'),
],
'code': [
(r"(REM)([^\n]*)",
bygroups(Keyword.Declaration, Comment.Single)),
(r'\n', Whitespace, 'root'),
(r'\s+', Whitespace),
(r':', Comment.Preproc),
# Some special cases to make functions come out nicer
(r'(DEF)(\s*)(FN|PROC)([A-Za-z_@][\w@]*)',
bygroups(Keyword.Declaration, Whitespace, Keyword.Declaration, Name.Function)),
(r'(FN|PROC)([A-Za-z_@][\w@]*)',
bygroups(Keyword, Name.Function)),
(r'(GOTO|GOSUB|THEN|RESTORE)(\s*)(\d+)',
bygroups(Keyword, Whitespace, Name.Label)),
(r'(TRUE|FALSE)', Keyword.Constant),
(r'(PAGE|LOMEM|HIMEM|TIME|WIDTH|ERL|ERR|REPORT\$|POS|VPOS|VOICES)', Keyword.Pseudo),
(words(base_keywords), Keyword),
(words(basic5_keywords), Keyword),
('"', String.Double, 'string'),
('%[01]{1,32}', Number.Bin),
('&[0-9a-f]{1,8}', Number.Hex),
(r'[+-]?[0-9]+\.[0-9]*(E[+-]?[0-9]+)?', Number.Float),
(r'[+-]?\.[0-9]+(E[+-]?[0-9]+)?', Number.Float),
(r'[+-]?[0-9]+E[+-]?[0-9]+', Number.Float),
(r'[+-]?\d+', Number.Integer),
(r'([A-Za-z_@][\w@]*[%$]?)', Name.Variable),
(r'([+\-]=|[$!|?+\-*/%^=><();]|>=|<=|<>|<<|>>|>>>|,)', Operator),
],
'string': [
(r'[^"\n]+', String.Double),
(r'"', String.Double, '#pop'),
(r'\n', Error, 'root'), # Unterminated string
],
}
def analyse_text(text):
if text.startswith('10REM >') or text.startswith('REM >'):
return 0.9
| |
# Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
import os.path
import tempfile
import shutil
from cStringIO import StringIO
import unittest
import platform
import Queue
from collections import Counter
import shutil
import nose.tools
import mock
import PIL.Image
import numpy as np
from . import create_db as _
class BaseTest():
"""
Provides some helpful files and utilities
"""
@classmethod
def setUpClass(cls):
cls.empty_file = tempfile.mkstemp()
cls.empty_dir = tempfile.mkdtemp()
# Create one good textfile
cls.good_file = tempfile.mkstemp()
# Create a color image
cls.color_image_file = tempfile.mkstemp(suffix='.png')
cls.numpy_image_color = np.ones((8,10,3), dtype='uint8')
cls.pil_image_color = PIL.Image.fromarray(cls.numpy_image_color)
cls.pil_image_color.save(cls.color_image_file[1])
# Create a grayscale image
cls.gray_image_file = tempfile.mkstemp(suffix='.png')
cls.numpy_image_gray = np.ones((8,10), dtype='uint8')
cls.pil_image_gray = PIL.Image.fromarray(cls.numpy_image_gray)
cls.pil_image_gray.save(cls.gray_image_file[1])
cls.image_count = 0
for i in xrange(3):
for j in xrange(3):
os.write(cls.good_file[0], '%s %s\n' % (cls.color_image_file[1], i))
os.write(cls.good_file[0], '%s %s\n' % (cls.gray_image_file[1], i))
cls.image_count += 2
@classmethod
def tearDownClass(cls):
for f in cls.empty_file, cls.good_file, cls.color_image_file, cls.gray_image_file:
try:
os.close(f[0])
os.remove(f[1])
except OSError:
pass
try:
shutil.rmtree(cls.empty_dir)
except OSError:
raise
class TestFillLoadQueue(BaseTest):
def test_valid_file(self):
for shuffle in True, False:
yield self.check_valid_file, shuffle
def check_valid_file(self, shuffle):
queue = Queue.Queue()
result = _._fill_load_queue(self.good_file[1], queue, shuffle)
assert result == self.image_count, 'lines not added'
assert queue.qsize() == self.image_count, 'queue not full'
def test_empty_file(self):
for shuffle in True, False:
yield self.check_empty_file, shuffle
def check_empty_file(self, shuffle):
queue = Queue.Queue()
nose.tools.assert_raises(
_.BadInputFileError,
_._fill_load_queue,
self.empty_file[1], queue, shuffle)
class TestParseLine():
def test_good_lines(self):
for label, line in [
(0, '/path/image.jpg 0'),
(1, 'image.jpg 1'),
(2, 'image.jpg 2\n'),
(3, 'image.jpg 3'),
(4, 'spaces in filename.jpg 4'),
]:
yield self.check_good_line, line, label
def check_good_line(self, line, label):
c = Counter()
p, l = _._parse_line(line, c)
assert l == label, 'parsed label wrong'
assert c[l] == 1, 'distribution is wrong'
def test_bad_lines(self):
for line in [
'nolabel.jpg',
'non-number.jpg five',
'negative.jpg -1',
]:
yield self.check_bad_line, line
def check_bad_line(self, line):
nose.tools.assert_raises(
_.ParseLineError,
_._parse_line,
line, Counter()
)
class TestCalculateBatchSize():
def test(self):
for count, batch_size in [
(1, 1),
(50, 50),
(100, 100),
(200, 100),
]:
yield self.check, count, batch_size
def check(self, count, batch_size):
assert _._calculate_batch_size(count) == batch_size
class TestCalculateNumThreads():
def test(self):
for batch_size, shuffle, num in [
(1000, True, 10),
(1000, False, 1),
(100, True, 10),
(100, False, 1),
(50, True, 7),
(4, True, 2),
(1, True, 1),
]:
yield self.check, batch_size, shuffle, num
def check(self, batch_size, shuffle, num):
assert _._calculate_num_threads(
batch_size, shuffle) == num
class TestInitialImageSum():
def test_color(self):
s = _._initial_image_sum(10, 10, 3)
assert s.shape == (10, 10, 3)
assert s.dtype == 'float64'
def test_grayscale(self):
s = _._initial_image_sum(10, 10, 1)
assert s.shape == (10, 10)
assert s.dtype == 'float64'
class TestImageToDatum(BaseTest):
def test(self):
for compression in None, 'png', 'jpg':
yield self.check_color, compression
yield self.check_grayscale, compression
def check_color(self, compression):
d = _._array_to_datum(self.numpy_image_color, 1, compression)
assert d.height == self.numpy_image_color.shape[0]
assert d.width == self.numpy_image_color.shape[1]
assert d.channels == 3
assert d.encoded == bool(compression)
def check_grayscale(self, compression):
d = _._array_to_datum(self.numpy_image_gray, 1, compression)
assert d.height == self.numpy_image_gray.shape[0]
assert d.width == self.numpy_image_gray.shape[1]
assert d.channels == 1
assert d.encoded == bool(compression)
class TestSaveMeans():
def test(self):
for color in True, False:
d = tempfile.mkdtemp()
for filename in 'mean.jpg', 'mean.png', 'mean.npy', 'mean.binaryproto':
yield self.check, d, filename, color
shutil.rmtree(d)
def check(self, directory, filename, color):
filename = os.path.join(directory, filename)
if color:
s = np.ones((8,10,3),dtype='float64')
else:
s = np.ones((8,10),dtype='float64')
_._save_means(s, 2, [filename])
assert os.path.exists(filename)
class BaseCreationTest(BaseTest):
def test_image_sizes(self):
for width in 8, 12:
for channels in 1, 3:
yield self.check_image_sizes, width, channels, False
def check_image_sizes(self, width, channels, shuffle):
_.create_db(self.good_file[1], os.path.join(self.empty_dir, 'db'),
width, 10, channels, self.BACKEND)
def test_no_shuffle(self):
_.create_db(self.good_file[1], os.path.join(self.empty_dir, 'db'),
10, 10, 1, self.BACKEND, shuffle=False)
def test_means(self):
mean_files = []
for suffix in 'jpg','npy','png','binaryproto':
mean_files.append(os.path.join(self.empty_dir, 'mean.%s' % suffix))
_.create_db(self.good_file[1], os.path.join(self.empty_dir, 'db'),
10, 10, 1, self.BACKEND, mean_files=mean_files)
class TestLmdbCreation(BaseCreationTest):
BACKEND = 'lmdb'
class TestHdf5Creation(BaseCreationTest):
BACKEND = 'hdf5'
| |
#!/usr/bin/env python
__author__ = "Kishori M Konwar"
__copyright__ = "Copyright 2013, MetaPathways"
__credits__ = ["r"]
__version__ = "1.0"
__maintainer__ = "Kishori M Konwar"
__status__ = "Release"
"""Contains general utility code for the metapaths project"""
from shutil import rmtree
from os import getenv, makedirs, _exit
from operator import itemgetter
from os.path import split, splitext, abspath, exists, dirname, join, isdir
from collections import defaultdict
from optparse import make_option
from datetime import datetime
from optparse import OptionParser
import sys, os, traceback, math, re, time
from libs.python_modules.utils.utils import *
from libs.python_modules.utils.errorcodes import error_message, get_error_list, insert_error
def halt_process(secs=4, verbose =False):
time.sleep(secs)
errors=get_error_list()
if len(errors)>1:
insert_error(200)
if verbose:
for errorcode in errors.keys():
eprintf("ERROR:\t%d\t%s\n",errorcode, errors[errorcode])
if len(errors.keys())>1:
errorcode = 200
_exit(errorcode)
elif len(errors.keys())==1:
errorcode = errors.keys()[0]
_exit(errorcode)
_exit(0)
def exit_process(message = None, delay = 0, logger = None):
if message != None:
eprintf("ERROR\t%s", message+ "\n")
eprintf('ERROR\tExiting the Python code\n')
if logger:
logger.printf('ERROR\tExiting the Python code\n')
logger.printf('ERROR\t' + message + '\n')
time.sleep(delay)
_exit(0)
def exit_step(message = None):
if message != None:
eprintf("%s", message+ "\n")
eprintf("INFO: Exiting the Python code\n")
eprintf("ERROR\t" + str(traceback.format_exc(10)) + "\n")
time.sleep(4)
_exit(0)
def getShortORFId(orfname) :
#return orfname
orfNameRegEx = re.compile(r'(\d+_\d+)$')
pos = orfNameRegEx.search(orfname)
shortORFname = ""
if pos:
shortORFname = pos.group(1)
return shortORFname
def getShortContigId(contigname):
contigNameRegEx = re.compile(r'(\d+)$')
shortContigname = ""
pos = contigNameRegEx.search(contigname)
if pos:
shortContigname = pos.group(1)
return shortContigname
def ContigID(contigname):
contigNameRegEx = re.compile(r'^(\S+_\d+)_\d+$')
shortContigname = ""
pos = contigNameRegEx.search(contigname)
if pos:
shortContigname = pos.group(1)
return shortContigname
def getSampleNameFromContig(contigname):
contigNameRegEx = re.compile(r'(.*)_(\d+)$')
sampleName = ""
pos = contigNameRegEx.search(contigname)
if pos:
sampleName = pos.group(1)
return sampleName
def strip_taxonomy(product):
func = re.sub(r'\[[^\[\]]+\]', '', product)
return func
def getSamFiles(readdir, sample_name):
'''This function finds the set of fastq files that has the reads'''
samFiles = []
_samFiles = glob(readdir + PATHDELIM + sample_name + '.sam')
if _samFiles:
samFiles = _samFiles
return samFiles
def getReadFiles(readdir, sample_name):
'''This function finds the set of fastq files that has the reads'''
fastqFiles = []
_fastqfiles = glob(readdir + PATHDELIM + sample_name + '*.[fF][aA][Ss][Tt][qQ][gz.]*')
fastqfiles =[]
for _f in _fastqfiles:
f = re.sub(r'^.*[//]','', _f)
fastqfiles.append(f)
samPATT=re.compile(sample_name+".fastq")
samPATT1=re.compile(sample_name+"[.]b\d+.fastq")
samPATT2=re.compile('('+sample_name+ ')'+"_[1-2].(fastq|fastq[.]gz)")
samPATT3=re.compile(sample_name+"_r[1-2].fastq")
samPATT4=re.compile(sample_name+"_[1-2][.](b\d+).fastq")
batch = {}
for f in fastqfiles:
res = samPATT.search(f)
if res:
readfiles.append( [readdir + PATHDELIM +f] )
continue
res = samPATT1.search(f)
if res:
readfiles.append( [readdir + PATHDELIM +f] )
continue
res = samPATT2.search(f)
if res:
if not res.group(1) in batch:
batch[res.group(1)] = []
batch[res.group(1)].append( readdir + PATHDELIM +f )
continue
res = samPATT3.search(f)
if res:
if not 'r' in batch:
batch['r'] = []
batch['r'].append( readdir + PATHDELIM +f )
continue
res = samPATT4.search(f)
if res:
if not res.group(1) in batch:
batch[res.group(1)] = []
batch[res.group(1)].append( readdir + PATHDELIM +f )
continue
eprintf("ERROR\tPossible error in read file naming \"%s\". Ignoring for now!\n", f)
readfiles = []
for key, values in batch.items():
readfiles.append(values)
return readfiles
def deprecated____getReadFiles(readdir, sample_name):
'''This function finds the set of fastq files that has the reads'''
fastqFiles = []
_fastqfiles = glob(readdir + PATHDELIM + sample_name + '_[12].[fF][aA][Ss][Tt][qQ]')
if _fastqfiles:
fastqFiles = _fastqfiles
_fastqfiles = glob(readdir + PATHDELIM + sample_name + '_[12].[fF][qQ]')
if _fastqfiles:
fastqFiles = _fastqfiles
_fastqfiles = glob(readdir + PATHDELIM + sample_name + '.[fF][aA][Ss][Tt][qQ]')
if _fastqfiles:
fastqFiles = _fastqfiles
_fastqfiles = glob(readdir + PATHDELIM + sample_name + '.[fF][qQ]')
if _fastqfiles:
fastqFiles = _fastqfiles
return fastqFiles
class GffFileParser(object):
def __init__(self, gff_filename):
self.Size = 10000
self.i=0
self.orf_dictionary = {}
self.gff_beg_pattern = re.compile("^#")
self.lines= []
self.size=0
try:
self.gff_file = open( gff_filename,'r')
except AttributeError:
print("Cannot read the map file for database :" + dbname)
sys.exit(0)
def __iter__(self):
return self
def refillBuffer(self):
self.orf_dictionary = {}
i = 0
while i < self.Size:
line=self.gff_file.readline()
if not line:
break
if self.gff_beg_pattern.search(line):
continue
self.insert_orf_into_dict(line, self.orf_dictionary)
i += 1
self.orfs = self.orf_dictionary.keys()
self.size = len(self.orfs)
self.i = 0
def next(self):
if self.i == self.size:
self.refillBuffer()
if self.size==0:
self.gff_file.close()
raise StopIteration()
#print self.i
if self.i < self.size:
self.i = self.i + 1
return self.orfs[self.i-1]
def insert_orf_into_dict(self, line, contig_dict):
rawfields = re.split('\t', line)
fields = []
for field in rawfields:
fields.append(field.strip());
if( len(fields) != 9):
return
attributes = {}
attributes['seqname'] = fields[0] # this is a bit of a duplication
attributes['source'] = fields[1]
attributes['feature'] = fields[2]
attributes['start'] = int(fields[3])
attributes['end'] = int(fields[4])
try:
attributes['score'] = float(fields[5])
except:
attributes['score'] = fields[5]
attributes['strand'] = fields[6]
attributes['frame'] = fields[7]
self.split_attributes(fields[8], attributes)
if not fields[0] in contig_dict :
contig_dict[fields[0]] = []
contig_dict[fields[0]].append(attributes)
def insert_attribute(self, attributes, attribStr):
rawfields = re.split('=', attribStr)
if len(rawfields) == 2:
attributes[rawfields[0].strip().lower()] = rawfields[1].strip()
def split_attributes(self, str, attributes):
rawattributes = re.split(';', str)
for attribStr in rawattributes:
self.insert_attribute(attributes, attribStr)
return attributes
class Performance:
def __init__(self):
self.sum = {}
self.sqsum = {}
self.num = {}
def getAverageDelay(self, server= None):
if server==None:
avg = 0
num = 0
for server in self.sum:
avg += self.sum[server]
num += self.num[server]
if num > 0:
return avg/num
else:
return 0
if self.num[server]==0:
return 0
avg = self.sum[server]/self.num[server]
return avg
def getStdDeviationDelay(self, server= None):
if server==None:
avg = 0
avgsq = 0
num = 0
for server in self.sum:
avg += self.sum[server]
avgsq += self.sqsum[server]
num += self.num[server]
if num == 0:
return 0
var = avgsq/num - avg*avg/(num*num)
std = math.sqrt(var)
return std
def addPerformanceData(self, server, data):
if not server in self.sum:
self.sum[server] = 0
self.sqsum[server] = 0
self.num[server] = 0
self.sum[server] += data
self.sqsum[server] += data*data
self.num[server] += 1
return True
def getExpectedDelay(self):
return 20
class Job:
def __init__(self, S, d, a, m, server=None):
self.S = S # sample
self.d = d # database
self.a = a # split
self.m = m # algorithm
self.server = None # server
return None
def setValues(self, S, d, a, m, t, server=None):
self.S = S
self.d = d
self.a = a
self.m = m
self.submission_time = t
self.server=server
return True
def parse_command_line_parameters(script_info, argv):
opts = []
return opts
class TreeMissingError(IOError):
"""Exception for missing tree file"""
pass
class OtuMissingError(IOError):
"""Exception for missing OTU file"""
pass
class AlignmentMissingError(IOError):
"""Exception for missing alignment file"""
pass
class MissingFileError(IOError):
pass
def make_safe_f(f, allowed_params):
"""Make version of f that ignores extra named params."""
def inner(*args, **kwargs):
if kwargs:
new_kwargs = {}
for k, v in kwargs.items():
if k in allowed_params:
new_kwargs[k] = v
return f(*args, **new_kwargs)
return f(*args, **kwargs)
return inner
class FunctionWithParams(object):
"""A FunctionWithParams is a replacement for the function factory.
Specifically, the params that will be used in the __call__ method are
available in a dict so you can keep track of them with the object
itself.
"""
Application = None
Algorithm = None
Citation = None
Params = {}
Name = 'FunctionWithParams' #override in subclasses
_tracked_properties = [] #properties tracked like params
def __init__(self, params):
"""Return new FunctionWithParams object with specified params.
Note: expect params to contain both generic and per-method (e.g. for
cdhit) params, so leaving it as a dict rather than setting
attributes.
Some standard entries in params are:
[fill in on a per-application basis]
"""
self.Params.update(params)
self._tracked_properties.extend(['Application','Algorithm','Citation'])
def __str__(self):
"""Returns formatted key-value pairs from params."""
res = [self.Name + ' parameters:']
for t in self._tracked_properties:
res.append(t + ':' + str(getattr(self, t)))
for k, v in sorted(self.Params.items()):
res.append(str(k) + ':' + str(v))
return '\n'.join(res)
def writeLog(self, log_path):
"""Writes self.Params and other relevant info to supplied path."""
f=open(log_path, 'w')
f.write(str(self))
f.close()
def getResult(self, *args, **kwargs):
"""Gets result in __call__. Override in subclasses."""
return None
def formatResult(self, result):
"""Formats result as string (for whatever "result" means)."""
return str(result)
def writeResult(self, result_path, result):
"""Writes result to result_path. May need to format in subclasses."""
f = open(result_path, 'w')
f.write(self.formatResult(result))
f.close()
def __call__ (self, result_path=None, log_path=None,\
*args, **kwargs):
"""Returns the result of calling the function using the params dict.
Parameters:
[fill in on a per-application basis]
"""
print("""Function with parameters""")
result = self.getResult(*args, **kwargs)
if log_path:
self.writeLog(log_path)
if result_path:
self.writeResult(result_path, result)
else:
return result
def get_qiime_project_dir():
""" Returns the top-level QIIME directory
"""
# Get the full path of util.py
current_file_path = abspath(__file__)
# Get the directory containing util.py
current_dir_path = dirname(current_file_path)
# Return the directory containing the directory containing util.py
return dirname(current_dir_path)
def get_qiime_scripts_dir():
""" Returns the QIIME scripts directory
This value must be stored in qiime_config if the user
has installed qiime using setup.py. If it is not in
qiime_config, it is inferred from the qiime_project_dir.
"""
qiime_config = load_qiime_config()
qiime_config_value = qiime_config['qiime_scripts_dir']
if qiime_config_value != None:
result = qiime_config_value
else:
result = join(get_qiime_project_dir(),'scripts')
#assert exists(result),\
# "qiime_scripts_dir does not exist: %s." % result +\
# " Have you defined it correctly in your qiime_config?"
return result
def load_qiime_config():
"""Return default parameters read in from file"""
qiime_config_filepaths = []
qiime_project_dir = get_qiime_project_dir()
qiime_config_filepaths.append(\
qiime_project_dir + '/qiime/support_files/qiime_config')
qiime_config_env_filepath = getenv('QIIME_CONFIG_FP')
if qiime_config_env_filepath:
qiime_config_filepaths.append(qiime_config_env_filepath)
home_dir = getenv('HOME')
if home_dir:
qiime_config_home_filepath = home_dir + '/.qiime_config'
qiime_config_filepaths.append(qiime_config_home_filepath)
qiime_config_files = []
for qiime_config_filepath in qiime_config_filepaths:
if exists(qiime_config_filepath):
qiime_config_files.append(open(qiime_config_filepath))
return parse_qiime_config_files(qiime_config_files)
# The qiime_blast_seqs function should evetually move to PyCogent,
# but I want to test that it works for all of the QIIME functionality that
# I need first. -Greg
def extract_seqs_by_sample_id(seqs, sample_ids, negate=False):
""" Returns (seq id, seq) pairs if sample_id is in sample_ids """
sample_ids = {}.fromkeys(sample_ids)
if not negate:
def f(s):
return s in sample_ids
else:
def f(s):
return s not in sample_ids
for seq_id, seq in seqs:
sample_id = seq_id.split('_')[0]
if f(sample_id):
yield seq_id, seq
def split_fasta_on_sample_ids(seqs):
""" yields (sample_id, seq_id, seq) for each entry in seqs
seqs: (seq_id,seq) pairs, as generated by MinimalFastaParser
"""
for seq_id, seq in seqs:
yield (seq_id.split()[0].rsplit('_',1)[0], seq_id, seq)
return
def split_fasta_on_sample_ids_to_dict(seqs):
""" return split_fasta_on_sample_ids as {sample_id: [(seq_id, seq), ], }
seqs: (seq_id,seq) pairs, as generated by MinimalFastaParser
"""
result = {}
for sample_id,seq_id,seq in split_fasta_on_sample_ids(seqs):
try:
result[sample_id].append((seq_id,seq))
except KeyError:
result[sample_id] = [(seq_id,seq)]
return result
def split_fasta_on_sample_ids_to_files(seqs,output_dir):
""" output of split_fasta_on_sample_ids to fasta in specified output_dir
seqs: (seq_id,seq) pairs, as generated by MinimalFastaParser
output_dir: string defining directory where output should be
written, will be created if it doesn't exist
"""
create_dir(output_dir)
file_lookup = {}
for sample_id,seq_id,seq in split_fasta_on_sample_ids(seqs):
try:
file_lookup[sample_id].write('>%s\n%s\n' % (seq_id,seq))
except KeyError:
file_lookup[sample_id] = open('%s/%s.fasta' %
(output_dir,sample_id),'w')
file_lookup[sample_id].write('>%s\n%s\n' % (seq_id,seq))
for file_handle in file_lookup.values():
file_handle.close()
return None
def isarray(a):
"""
This function tests whether an object is an array
"""
try:
validity=isinstance(a,ndarray)
except:
validity=False
return validity
def degap_fasta_aln(seqs):
"""degap a Fasta aligment.
seqs: list of label,seq pairs
"""
for (label,seq) in seqs:
degapped_seq = Sequence(moltype=DNA_with_more_gaps,
seq=seq, name=label).degap()
degapped_seq.Name = label
yield degapped_seq
def write_degapped_fasta_to_file(seqs, tmp_dir="/tmp/"):
""" write degapped seqs to temp fasta file."""
tmp_filename = get_tmp_filename(tmp_dir=tmp_dir, prefix="degapped_", suffix=".fasta")
fh = open(tmp_filename,"w")
for seq in degap_fasta_aln(seqs):
fh.write(seq.toFasta()+"\n")
fh.close()
return tmp_filename
# remove the string "/pathway-tools" to infer the pathway tools dir
def create_pathway_tools_dir_path_From_executable(pathway_tools_executable):
return( pathway_tools_executable.replace('pathway-tools/pathway-tools', 'pathway-tools'))
#removes an existing pgdb from the ptools-local/pgdbs/user directory under the
#pathway tools directory
def remove_existing_pgdb( sample_name, pathway_tools_exec):
suffix_to_remove = ""
# crete the pathway tools dir
pathway_tools_dir = create_pathway_tools_dir_path_From_executable(pathway_tools_exec)
sample_pgdb_dir = pathway_tools_dir + "/" + "ptools-local/pgdbs/user/" + sample_name + "cyc"
if os.path.exists(sample_pgdb_dir):
return rmtree(sample_pgdb_dir)
def generate_log_fp(output_dir,
basefile_name='',
suffix='txt',
timestamp_pattern=''):
filename = '%s.%s' % (basefile_name,suffix)
return join(output_dir,filename)
class WorkflowError(Exception):
pass
def contract_key_value_file(fileName):
file = open(fileName,'r')
lines = file.readlines()
if len(lines) < 20:
file.close()
return
keyValuePairs = {}
for line in lines:
fields = [ x.strip() for x in line.split('\t') ]
if len(fields) == 2:
keyValuePairs[fields[0]] = fields[1]
file.close()
file = open(fileName,'w')
for key, value in keyValuePairs.iteritems():
fprintf(file, "%s\t%s\n",key, value)
file.close()
class FastaRecord(object):
def __init__(self, name, sequence):
self.name = name
self.sequence = sequence
# return FastaRecord(title, sequence)
def read_fasta_records(input_file):
records = []
sequence=""
while 1:
line = input_file.readline()
if line == "":
if sequence!="" and name!="":
records.append(FastaRecord(name, sequence))
return records
if line=='\n':
continue
line = line.rstrip()
if line.startswith(">") :
if sequence!="" and name!="":
records.append(FastaRecord(name, sequence))
name = line.rstrip()
sequence =""
else:
sequence = sequence + line.rstrip()
return records
class WorkflowLogger(object):
def __init__(self,log_fp=None,params=None,metapaths_config=None,open_mode='w'):
if log_fp:
self._filename = log_fp
#contract the file if we have to
if open_mode=='c':
try:
contract_key_value_file(log_fp)
except:
pass
open_mode='a'
self._f = open(self._filename, open_mode)
self._f.close()
else:
self._f = None
#start_time = datetime.now().strftime('%H:%M:%S on %d %b %Y')
self.writemetapathsConfig(metapaths_config)
self.writeParams(params)
def get_log_filename(self):
return self._filename
def printf(self, fmt, *args):
self._f = open(self._filename,'a')
if self._f:
self._f.write(fmt % args)
self._f.flush()
else:
pass
self._f.close()
def write(self, s):
self._f = open(self._filename,'a')
if self._f:
self._f.write(s)
# Flush here so users can see what step they're
# on after each write, since some steps can take
# a long time, and a relatively small amount of
# data is being written to the log files.
self._f.flush()
else:
pass
self._f.close()
def writemetapathsConfig(self,metapaths_config):
if metapaths_config == None:
#self.write('#No metapaths config provided.\n')
pass
else:
self.write('#metapaths_config values:\n')
for k,v in metapaths_config.items():
if v:
self.write('%s\t%s\n' % (k,v))
self.write('\n')
def writeParams(self,params):
if params == None:
#self.write('#No params provided.\n')
pass
else:
self.write('#parameter file values:\n')
for k,v in params.items():
for inner_k,inner_v in v.items():
val = inner_v or 'True'
self.write('%s:%s\t%s\n' % (k,inner_k,val))
self.write('\n')
def close(self):
end_time = datetime.now().strftime('%H:%M:%S on %d %b %Y')
self.write('\nLogging stopped at %s\n' % end_time)
if self._f:
self._f.close()
else:
pass
def ShortenORFId(_orfname, RNA=False) :
ORFIdPATT = re.compile("(\\d+_\\d+)$")
RNAPATT = re.compile("(\\d+_\\d+_[tr]RNA)$")
if RNA:
result = RNAPATT.search(_orfname)
else:
result = ORFIdPATT.search(_orfname)
if result:
shortORFname = result.group(1)
else:
return ""
return shortORFname
def ShortentRNAId(_orfname) :
ORFIdPATT = re.compile("(\\d+_\\d+_tRNA)$")
result = ORFIdPATT.search(_orfname)
if result:
shortORFname = result.group(1)
else:
return ""
return shortORFname
def ShortenrRNAId(_orfname) :
ORFIdPATT = re.compile("(\\d+_\\d+_rRNA)$")
result = ORFIdPATT.search(_orfname)
if result:
shortORFname = result.group(1)
else:
return ""
return shortORFname
def ShortenContigId(_contigname) :
ContigIdPATT = re.compile("(\\d+)$")
result = ContigIdPATT.search(_contigname)
if result:
shortContigname = result.group(1)
else:
return ""
return shortContigname
def create_metapaths_parameters(filename, folder):
""" creates a parameters file from the default """
default_filename = folder + PATHDELIM + 'resources'+ PATHDELIM + "template_param.txt"
try:
filep = open(default_filename, 'r')
except:
eprintf("ERROR: cannot open the default parameter file " + sQuote(default_filename) )
exit_process("ERROR: cannot open the default parameter file " + sQuote(default_filename), errorCode = 0 )
lines = filep.readlines()
with open(filename, 'w') as newfile:
for line in lines:
fprintf(newfile, "%s", line);
filep.close()
#result['filename'] = filename
return True
def touch(fname, times=None):
with open(fname, 'a'):
os.utime(fname, times)
def create_metapaths_configuration(filename, folder):
""" creates a cofiguration file from the default """
variablePATT = re.compile(r'<([a-zA-Z0-9_]*)>')
default_filename = folder + PATHDELIM + 'resources'+ PATHDELIM + "template_config.txt"
try:
filep = open(default_filename, 'r')
except:
eprintf("ERROR: cannot open the default config file " + sQuote(default_filename) )
exit_process("ERROR: cannot open the default config file " + sQuote(default_filename), errorCode = 0 )
setVariables = {}
lines = filep.readlines()
with open(filename, 'w') as newfile:
for line in lines:
line = line.strip()
result = variablePATT.search(line)
if result:
VARIABLE=result.group(1)
CONFIG_VARIABLE =[ x for x in line.split(' ') if x.strip() ][0]
if VARIABLE in setVariables:
line =line.replace( '<'+ VARIABLE + '>', setVariables[VARIABLE])
elif VARIABLE in os.environ:
line =line.replace( '<'+ VARIABLE + '>', os.environ[VARIABLE])
else:
default =""
if VARIABLE=='METAPATHWAYS_PATH':
default = folder + PATHDELIM
if VARIABLE=='METAPATHWAYS_DB':
if 'METAPATHWAYS_PATH' in os.environ:
default = os.environ['METAPATHWAYS_PATH'] + PATHDELIM + 'databases/'
else:
eprintf("%-10s:\tSet required environment variable METAPATHWAYS_DB as 'export METAPATHWAYS_DB=<path>'\n" %('INFO'))
if VARIABLE=='PTOOLS_DIR':
if 'METAPATHWAYS_PATH' in os.environ:
default = os.environ['METAPATHWAYS_PATH'] + PATHDELIM + '/regtests'
target = default + PATHDELIM + 'pathway-tools'
if not os.path.exists(target):
os.mkdir(target)
target = target + PATHDELIM + 'pathway-tools'
if not os.path.exists(target):
touch(target)
else:
eprintf("%-10%:\tSet shell essential variable PTOOLS_DIR as 'export PTOOLS_DIR=<path>'\n" %('INFO') )
setVariables[VARIABLE]= default
line = line.replace('<' + VARIABLE + '>', default)
eprintf("INFO: Setting default value for \"%s\" as \"%s\"\n" %( CONFIG_VARIABLE, line))
eprintf(" To set other values :\n")
eprintf(" 1. remove file \"%s\"\n" %(filename))
eprintf(" 2. set the shell variable \"%s\"\n" %(VARIABLE))
eprintf(" 3. rerun command\n")
fprintf(newfile, "%s\n", line);
filep.close()
#result['filename'] = filename
return True
| |
#!/usr/bin/env python
import sys
import os
import rospy
import rospkg
from threading import Thread
from python_qt_binding import loadUi
from python_qt_binding import QtGui
from python_qt_binding.QtGui import QWidget
from trainergui import Ui_MainWindow
from inmoov_msgs.msg import MotorStatus
from inmoov_msgs.msg import MotorCommand
from inmoov_msgs.srv import MotorParameter
from sensor_msgs.msg import JointState
from std_msgs.msg import Header
# https://github.com/ColinDuquesnoy/QDarkStyleSheet
import qdarkstyle
# https://nikolak.com/pyqt-qt-designer-getting-started/
class ExampleApp(QtGui.QMainWindow, Ui_MainWindow):
def __init__(self):
# Explaining super is out of the scope of this article
# So please google it if you're not familar with it
# Simple reason why we use it here is that it allows us to
# access variables, methods etc in the design.py file
super(self.__class__, self).__init__()
self.setupUi(self) # This is defined in design.py file automatically
# It sets up layout and widgets that are defined
self.parameterTopic = ["servobus/torso/motorparameter","servobus/leftarm/motorparameter","servobus/rightarm/motorparameter"]
self.motorcommand = MotorCommand()
self.jointcommand = JointState()
self.jointNames = []
for servo in range (0, 11):
self.jointNames.append( rospy.get_param('servobus/torso/servomap/'+str(servo)+'/name'))
for servo in range (0, 11):
self.jointNames.append( rospy.get_param('servobus/leftarm/servomap/'+str(servo)+'/name'))
for servo in range (0, 11):
self.jointNames.append( rospy.get_param('servobus/rightarm/servomap/'+str(servo)+'/name'))
#print(self.jointNames)
#'right_pinky','right_ring','right_middle','right_index','right_thumb',
#'right_hand','right_bicep','right_bicep_rotate','right_shoulder_side','right_shoulder_up','','',
#'eye_leftright','eyes_updown','jaw','head_leftright','head_updown','head_tilt','waist_lean','waist_rotate','','','','',
#'left_pinky','left_ring','left_middle','left_index','left_thumb',
#'left_hand','left_bicep','left_bicep_rotate','left_shoulder_side','left_shoulder_up','','',
self.setupDropDowns()
self.cmbBus.currentIndexChanged.connect(self.busChanged)
self.cmbServo.currentIndexChanged.connect(self.servoChanged)
self.txtGoal.editingFinished.connect(self.setGoal)
self.txtMinPulse.editingFinished.connect(self.setMinPulse)
self.txtMaxPulse.editingFinished.connect(self.setMaxPulse)
self.txtMinGoal.editingFinished.connect(self.setMinGoal)
self.txtMaxGoal.editingFinished.connect(self.setMaxGoal)
self.txtMinSensor.editingFinished.connect(self.setMinSensor)
self.txtMaxSensor.editingFinished.connect(self.setMaxSensor)
self.chkEnabled.stateChanged.connect(self.setEnabled)
self.chkCalibrated.stateChanged.connect(self.setCalibrated)
self.sliderGoal.valueChanged.connect(self.sliderChanged)
rospy.init_node('trainer', anonymous=True)
self.commandPublisher = []
self.commandPublisher.append(rospy.Publisher("servobus/torso/motorcommand", MotorCommand, queue_size=10))
self.commandPublisher.append(rospy.Publisher("servobus/leftarm/motorcommand", MotorCommand, queue_size=10))
self.commandPublisher.append(rospy.Publisher("servobus/rightarm/motorcommand", MotorCommand, queue_size=10))
self.statusSubscriber = []
self.statusSubscriber.append(rospy.Subscriber("servobus/torso/motorstatus", MotorStatus, self.callback0))
self.statusSubscriber.append(rospy.Subscriber("servobus/leftarm/motorstatus", MotorStatus, self.callback1))
self.statusSubscriber.append(rospy.Subscriber("servobus/rightarm/motorstatus", MotorStatus, self.callback2))
self.jointPublisher = rospy.Publisher("joint_command", JointState, queue_size=10)
self.bus = 0
self.servo = 0
self.busChanged()
self.servoChanged()
def busChanged(self):
# unregister topics and reregister to the new ones
self.bus = self.cmbBus.currentIndex()
self.cmbServo.clear()
for s in range(0, 11):
self.cmbServo.addItem(self.jointNames[(self.bus * 11) + s])
#self.commandPublisher.unregister()
#self.commandPublisher = rospy.Publisher(self.commandTopic[bus], MotorCommand, queue_size=10)
#self.statusSubscriber.unregister()
#self.statusSubscriber = rospy.Subscriber(self.statusTopic[self.bus], MotorStatus, self.callback)
self.servoChanged()
def servoChanged(self):
if self.cmbServo.count() > 0:
self.servo = self.cmbServo.currentIndex()
self.getMinPulse()
self.getMaxPulse()
self.getMinGoal()
self.getMaxGoal()
self.getGoal()
self.getMinSensor()
self.getMaxSensor()
self.getEnabled()
self.getCalibrated()
def callback0(self, data):
if data.id == self.servo and self.bus == 0:
#print data.posraw
#self.chkEnabled.setChecked(bool(data.enabled))
self.txtPosition.setText(str(data.position))
self.txtSpeed.setText(str(data.presentspeed))
self.txtSensorRaw.setText(str(data.posraw))
self.chkMoving.setChecked(bool(data.moving))
self.chkPower.setChecked(bool(data.power))
#self.txtGoal.setText(str(data.goal))
def callback1(self, data):
if data.id == self.servo and self.bus == 1:
#print data.posraw
#self.chkEnabled.setChecked(bool(data.enabled))
self.txtPosition.setText(str(data.position))
self.txtSpeed.setText(str(data.presentspeed))
self.txtSensorRaw.setText(str(data.posraw))
self.chkMoving.setChecked(bool(data.moving))
self.chkPower.setChecked(bool(data.power))
#self.txtGoal.setText(str(data.goal))
def callback2(self, data):
if data.id == self.servo and self.bus == 2:
#print data.posraw
#self.chkEnabled.setChecked(bool(data.enabled))
self.txtPosition.setText(str(data.position))
self.txtSpeed.setText(str(data.presentspeed))
self.txtSensorRaw.setText(str(data.posraw))
self.chkMoving.setChecked(bool(data.moving))
self.chkPower.setChecked(bool(data.power))
#self.txtGoal.setText(str(data.goal))
def degreestoradians(self, d):
return d*(3.1415926/180.0)
def setupDropDowns(self):
self.cmbBus.addItem(rospy.get_param('/servobus/torso/name'))
self.cmbBus.addItem(rospy.get_param('/servobus/leftarm/name'))
self.cmbBus.addItem(rospy.get_param('/servobus/rightarm/name'))
for servo in range (0, 11):
print('/servobus/torso/servomap/' + str(servo) + '/name')
self.cmbServo.addItem(rospy.get_param('/servobus/torso/servomap/' + str(servo) + '/name'))
#self.cmbServo.addItem('Servo 00')
#self.cmbServo.addItem('Servo 01')
#self.cmbServo.addItem('Servo 02')
#self.cmbServo.addItem('Servo 03')
#self.cmbServo.addItem('Servo 04')
#self.cmbServo.addItem('Servo 05')
#self.cmbServo.addItem('Servo 06')
#self.cmbServo.addItem('Servo 07')
#self.cmbServo.addItem('Servo 08')
#self.cmbServo.addItem('Servo 09')
#self.cmbServo.addItem('Servo 10')
#self.cmbServo.addItem('Servo 11')
self.cmbSmoothing.addItem('0 - Instant')
self.cmbSmoothing.addItem('1 - Max Speed')
self.cmbSmoothing.addItem('2 - Linear Ramp')
self.cmbSmoothing.addItem('3 - COS Ramp')
self.cmbSmoothing.addItem('4 - COS^2 Ramp')
def sliderChanged(self, i):
self.txtGoal.setText(str(i/1000.0))
self.setGoal()
def setGoal(self):
#print(str(value))
self.motorcommand.id = self.cmbServo.currentIndex()
self.motorcommand.parameter = 0x1E
self.motorcommand.value = float(self.txtGoal.text())
#print(self.motorcommand.value)
self.commandPublisher[self.bus].publish(self.motorcommand)
self.jointcommand.header = Header()
self.jointcommand.header.stamp = rospy.Time.now()
self.jointcommand.name = [self.jointNames[((self.bus * 12) + self.servo)]]
self.jointcommand.position = [self.degreestoradians(float(self.txtGoal.text()))]
self.jointcommand.velocity = []
self.jointcommand.effort = []
self.jointPublisher.publish(self.jointcommand)
def getGoal(self):
bus = self.cmbBus.currentIndex()
motorparameter = rospy.ServiceProxy(self.parameterTopic[bus], MotorParameter)
value = motorparameter(self.cmbServo.currentIndex(), 0x1E).data
self.txtGoal.setText(str(value))
self.sliderGoal.setValue(int(value * 1000.0))
def setMinPulse(self):
#print(str(value))
self.motorcommand.id = self.cmbServo.currentIndex()
self.motorcommand.parameter = 0x14
self.motorcommand.value = float(self.txtMinPulse.text())
self.commandPublisher[self.bus].publish(self.motorcommand)
def getMinPulse(self):
bus = self.cmbBus.currentIndex()
motorparameter = rospy.ServiceProxy(self.parameterTopic[bus], MotorParameter)
self.txtMinPulse.setText(str(motorparameter(self.cmbServo.currentIndex(), 0x14).data))
def setMaxPulse(self):
#print(str(value))
self.motorcommand.id = self.cmbServo.currentIndex()
self.motorcommand.parameter = 0x16
self.motorcommand.value = float(self.txtMaxPulse.text())
self.commandPublisher[self.bus].publish(self.motorcommand)
def getMaxPulse(self):
bus = self.cmbBus.currentIndex()
motorparameter = rospy.ServiceProxy(self.parameterTopic[bus], MotorParameter)
self.txtMaxPulse.setText(str(motorparameter(self.cmbServo.currentIndex(), 0x16).data))
def setMinGoal(self):
#print(str(value))
self.motorcommand.id = self.cmbServo.currentIndex()
self.motorcommand.parameter = 0x06
self.motorcommand.value = float(self.txtMinGoal.text())
self.sliderGoal.setMinimum(int(self.motorcommand.value * 1000.0))
self.commandPublisher[self.bus].publish(self.motorcommand)
def getMinGoal(self):
bus = self.cmbBus.currentIndex()
motorparameter = rospy.ServiceProxy(self.parameterTopic[bus], MotorParameter)
value = motorparameter(self.cmbServo.currentIndex(), 0x06).data
self.txtMinGoal.setText(str(value))
self.sliderGoal.setMinimum(int(value * 1000.0))
def setMaxGoal(self):
#print(str(value))
self.motorcommand.id = self.cmbServo.currentIndex()
self.motorcommand.parameter = 0x08
self.motorcommand.value = float(self.txtMaxGoal.text())
self.sliderGoal.setMaximum(int(self.motorcommand.value * 1000.0))
self.commandPublisher[self.bus].publish(self.motorcommand)
def getMaxGoal(self):
bus = self.cmbBus.currentIndex()
motorparameter = rospy.ServiceProxy(self.parameterTopic[bus], MotorParameter)
value = motorparameter(self.cmbServo.currentIndex(), 0x08).data
self.txtMaxGoal.setText(str(value))
self.sliderGoal.setMaximum(int(value * 1000.0))
def setMinSensor(self):
#print(str(value))
self.motorcommand.id = self.cmbServo.currentIndex()
self.motorcommand.parameter = 0xA2
self.motorcommand.value = float(self.txtMinSensor.text())
self.commandPublisher[self.bus].publish(self.motorcommand)
def getMinSensor(self):
bus = self.cmbBus.currentIndex()
motorparameter = rospy.ServiceProxy(self.parameterTopic[bus], MotorParameter)
self.txtMinSensor.setText(str(motorparameter(self.cmbServo.currentIndex(), 0xA2).data))
def setMaxSensor(self):
#print(str(value))
self.motorcommand.id = self.cmbServo.currentIndex()
self.motorcommand.parameter = 0xA4
self.motorcommand.value = float(self.txtMaxSensor.text())
self.commandPublisher[self.bus].publish(self.motorcommand)
def getMaxSensor(self):
bus = self.cmbBus.currentIndex()
motorparameter = rospy.ServiceProxy(self.parameterTopic[bus], MotorParameter)
self.txtMaxSensor.setText(str(motorparameter(self.cmbServo.currentIndex(), 0xA4).data))
def setEnabled(self):
self.motorcommand.id = self.cmbServo.currentIndex()
self.motorcommand.parameter = 0x18
self.motorcommand.value = float(self.chkEnabled.isChecked())
self.commandPublisher[self.bus].publish(self.motorcommand)
def getEnabled(self):
bus = self.cmbBus.currentIndex()
motorparameter = rospy.ServiceProxy(self.parameterTopic[bus], MotorParameter)
self.chkEnabled.setChecked(bool(motorparameter(self.cmbServo.currentIndex(), 0x18).data))
def setCalibrated(self):
self.motorcommand.id = self.cmbServo.currentIndex()
self.motorcommand.parameter = 0xA0
self.motorcommand.value = float(self.chkCalibrated.isChecked())
self.commandPublisher[self.bus].publish(self.motorcommand)
def getCalibrated(self):
bus = self.cmbBus.currentIndex()
motorparameter = rospy.ServiceProxy(self.parameterTopic[bus], MotorParameter)
self.chkCalibrated.setChecked(bool(motorparameter(self.cmbServo.currentIndex(), 0xA0).data))
def main():
app = QtGui.QApplication(sys.argv) # A new instance of QApplication
app.setStyleSheet(qdarkstyle.load_stylesheet(pyside=False))
form = ExampleApp() # We set the form to be our ExampleApp (design)
form.show() # Show the form
app.exec_() # and execute the app
if __name__ == '__main__': # if we're running file directly and not importing it
main()
| |
#!/usr/bin/python
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for pushimage.py"""
from __future__ import print_function
import logging
import mock
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)),
'..', '..'))
from chromite.lib import cros_build_lib
from chromite.lib import cros_test_lib
from chromite.lib import gs
from chromite.lib import gs_unittest
from chromite.lib import osutils
from chromite.lib import partial_mock
from chromite.lib import signing
from chromite.scripts import pushimage
class InputInsnsTest(cros_test_lib.MockTestCase):
"""Tests for InputInsns"""
def testBasic(self):
"""Simple smoke test"""
with mock.patch.object(gs.GSContext, 'Exists', return_value=False):
insns = pushimage.InputInsns('test.board')
insns.GetInsnFile('recovery')
self.assertEqual(insns.GetChannels(), ['dev', 'canary'])
self.assertEqual(insns.GetKeysets(), ['stumpy-mp-v3'])
def testGetInsnFile(self):
"""Verify various inputs result in right insns path"""
testdata = (
('UPPER_CAPS', 'UPPER_CAPS'),
('recovery', 'test.board'),
('firmware', 'test.board.firmware'),
('factory', 'test.board.factory'),
)
insns = pushimage.InputInsns('test.board')
for image_type, filename in testdata:
ret = insns.GetInsnFile(image_type)
self.assertEqual(os.path.basename(ret), '%s.instructions' % (filename))
def testSplitCfgField(self):
"""Verify splitting behavior behaves"""
testdata = (
('', []),
('a b c', ['a', 'b', 'c']),
('a, b', ['a', 'b']),
('a,b', ['a', 'b']),
('a,\tb', ['a', 'b']),
('a\tb', ['a', 'b']),
)
for val, exp in testdata:
ret = pushimage.InputInsns.SplitCfgField(val)
self.assertEqual(ret, exp)
def testOutputInsnsBasic(self):
"""Verify output instructions are sane"""
exp_content = """[insns]
keyset = stumpy-mp-v3
channel = dev canary
chromeos_shell = false
ensure_no_password = true
firmware_update = true
security_checks = true
create_nplusone = true
[general]
"""
insns = pushimage.InputInsns('test.board')
m = self.PatchObject(osutils, 'WriteFile')
insns.OutputInsns('recovery', '/bogus', {}, {})
self.assertTrue(m.called)
content = m.call_args_list[0][0][1]
self.assertEqual(content.rstrip(), exp_content.rstrip())
def testOutputInsnsReplacements(self):
"""Verify output instructions can be updated"""
exp_content = """[insns]
keyset = batman
channel = dev
chromeos_shell = false
ensure_no_password = true
firmware_update = true
security_checks = true
create_nplusone = true
[general]
board = board
config_board = test.board
"""
sect_insns = {
'channel': 'dev',
'keyset': 'batman',
}
sect_general = {
'config_board': 'test.board',
'board': 'board',
}
insns = pushimage.InputInsns('test.board')
m = self.PatchObject(osutils, 'WriteFile')
insns.OutputInsns('recovery', '/a/file', sect_insns, sect_general)
self.assertTrue(m.called)
content = m.call_args_list[0][0][1]
self.assertEqual(content.rstrip(), exp_content.rstrip())
class MarkImageToBeSignedTest(gs_unittest.AbstractGSContextTest):
"""Tests for MarkImageToBeSigned()"""
def setUp(self):
# Minor optimization -- we call this for logging purposes in the main
# code, but don't really care about it for testing. It just slows us.
self.PatchObject(cros_build_lib, 'MachineDetails', return_value='1234\n')
def testBasic(self):
"""Simple smoke test"""
tbs_base = 'gs://some-bucket'
insns_path = 'chan/board/ver/file.instructions'
tbs_file = '%s/tobesigned/90,chan,board,ver,file.instructions' % tbs_base
ret = pushimage.MarkImageToBeSigned(self.ctx, tbs_base, insns_path, 90)
self.assertEqual(ret, tbs_file)
def testPriority(self):
"""Verify diff priority values get used correctly"""
for prio, sprio in ((0, '00'), (9, '09'), (35, '35'), (99, '99')):
ret = pushimage.MarkImageToBeSigned(self.ctx, '', '', prio)
self.assertEquals(ret, '/tobesigned/%s,' % sprio)
def testBadPriority(self):
"""Verify we reject bad priority values"""
for prio in (-10, -1, 100, 91239):
self.assertRaises(ValueError, pushimage.MarkImageToBeSigned, self.ctx,
'', '', prio)
def testTbsUpload(self):
"""Make sure we actually try to upload the file"""
pushimage.MarkImageToBeSigned(self.ctx, '', '', 50)
self.gs_mock.assertCommandContains(['cp', '--'])
class PushImageTests(gs_unittest.AbstractGSContextTest):
"""Tests for PushImage()"""
def setUp(self):
self.mark_mock = self.PatchObject(pushimage, 'MarkImageToBeSigned')
def testBasic(self):
"""Simple smoke test"""
EXPECTED = {
'canary': [
('gs://chromeos-releases/canary-channel/test.board-hi/5126.0.0/'
'ChromeOS-recovery-R34-5126.0.0-test.board-hi.instructions')],
'dev': [
('gs://chromeos-releases/dev-channel/test.board-hi/5126.0.0/'
'ChromeOS-recovery-R34-5126.0.0-test.board-hi.instructions')],
}
with mock.patch.object(gs.GSContext, 'Exists', return_value=True):
urls = pushimage.PushImage('/src', 'test.board', 'R34-5126.0.0',
profile='hi')
self.assertEqual(urls, EXPECTED)
def testBasicMock(self):
"""Simple smoke test in mock mode"""
with mock.patch.object(gs.GSContext, 'Exists', return_value=True):
pushimage.PushImage('/src', 'test.board', 'R34-5126.0.0',
dry_run=True, mock=True)
def testBadVersion(self):
"""Make sure we barf on bad version strings"""
self.assertRaises(ValueError, pushimage.PushImage, '', '', 'asdf')
def testNoInsns(self):
"""Boards w/out insn files should get skipped"""
urls = pushimage.PushImage('/src', 'a bad bad board', 'R34-5126.0.0')
self.assertEqual(self.gs_mock.call_count, 0)
self.assertEqual(urls, None)
def testSignTypesRecovery(self):
"""Only sign the requested recovery type"""
EXPECTED = {
'canary': [
('gs://chromeos-releases/canary-channel/test.board/5126.0.0/'
'ChromeOS-recovery-R34-5126.0.0-test.board.instructions')],
'dev': [
('gs://chromeos-releases/dev-channel/test.board/5126.0.0/'
'ChromeOS-recovery-R34-5126.0.0-test.board.instructions')],
}
urls = pushimage.PushImage('/src', 'test.board', 'R34-5126.0.0',
sign_types=['recovery'])
self.assertEqual(self.gs_mock.call_count, 18)
self.assertTrue(self.mark_mock.called)
self.assertEqual(urls, EXPECTED)
def testSignTypesNone(self):
"""Verify nothing is signed when we request an unavailable type"""
urls = pushimage.PushImage('/src', 'test.board', 'R34-5126.0.0',
sign_types=['nononononono'])
self.assertEqual(self.gs_mock.call_count, 16)
self.assertFalse(self.mark_mock.called)
self.assertEqual(urls, {})
def testGsError(self):
"""Verify random GS errors don't make us blow up entirely"""
self.gs_mock.AddCmdResult(partial_mock.In('stat'), returncode=1,
output='gobblety gook\n')
with cros_test_lib.LoggingCapturer('chromite'):
self.assertRaises(pushimage.PushError, pushimage.PushImage, '/src',
'test.board', 'R34-5126.0.0')
class MainTests(cros_test_lib.MockTestCase):
"""Tests for main()"""
def setUp(self):
self.PatchObject(pushimage, 'PushImage')
def testBasic(self):
"""Simple smoke test"""
pushimage.main(['--board', 'test.board', '/src', '--yes'])
if __name__ == '__main__':
# Use our local copy of insns for testing as the main one is not
# available in the public manifest.
signing.INPUT_INSN_DIR = signing.TEST_INPUT_INSN_DIR
# Run the tests.
cros_test_lib.main(level=logging.INFO)
| |
"""
Test breakpoint names.
"""
import os
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class BreakpointNames(TestBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
@add_test_categories(['pyapi'])
def test_setting_names(self):
"""Use Python APIs to test that we can set breakpoint names."""
self.build()
self.setup_target()
self.do_check_names()
def test_illegal_names(self):
"""Use Python APIs to test that we don't allow illegal names."""
self.build()
self.setup_target()
self.do_check_illegal_names()
def test_using_names(self):
"""Use Python APIs to test that operations on names works correctly."""
self.build()
self.setup_target()
self.do_check_using_names()
def test_configuring_names(self):
"""Use Python APIs to test that configuring options on breakpoint names works correctly."""
self.build()
self.make_a_dummy_name()
self.setup_target()
self.do_check_configuring_names()
def test_configuring_permissions_sb(self):
"""Use Python APIs to test that configuring permissions on names works correctly."""
self.build()
self.setup_target()
self.do_check_configuring_permissions_sb()
def test_configuring_permissions_cli(self):
"""Use Python APIs to test that configuring permissions on names works correctly."""
self.build()
self.setup_target()
self.do_check_configuring_permissions_cli()
def setup_target(self):
exe = self.getBuildArtifact("a.out")
# Create a targets we are making breakpoint in and copying to:
self.target = self.dbg.CreateTarget(exe)
self.assertTrue(self.target, VALID_TARGET)
self.main_file_spec = lldb.SBFileSpec(os.path.join(self.getSourceDir(), "main.c"))
def check_name_in_target(self, bkpt_name):
name_list = lldb.SBStringList()
self.target.GetBreakpointNames(name_list)
found_it = False
for name in name_list:
if name == bkpt_name:
found_it = True
break
self.assertTrue(found_it, "Didn't find the name %s in the target's name list:"%(bkpt_name))
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# These are the settings we're going to be putting into names & breakpoints:
self.bp_name_string = "ABreakpoint"
self.is_one_shot = True
self.ignore_count = 1000
self.condition = "1 == 2"
self.auto_continue = True
self.tid = 0xaaaa
self.tidx = 10
self.thread_name = "Fooey"
self.queue_name = "Blooey"
self.cmd_list = lldb.SBStringList()
self.cmd_list.AppendString("frame var")
self.cmd_list.AppendString("bt")
self.help_string = "I do something interesting"
def do_check_names(self):
"""Use Python APIs to check that we can set & retrieve breakpoint names"""
bkpt = self.target.BreakpointCreateByLocation(self.main_file_spec, 10)
bkpt_name = "ABreakpoint"
other_bkpt_name = "_AnotherBreakpoint"
# Add a name and make sure we match it:
success = bkpt.AddNameWithErrorHandling(bkpt_name)
self.assertSuccess(success, "We couldn't add a legal name to a breakpoint.")
matches = bkpt.MatchesName(bkpt_name)
self.assertTrue(matches, "We didn't match the name we just set")
# Make sure we don't match irrelevant names:
matches = bkpt.MatchesName("NotABreakpoint")
self.assertTrue(not matches, "We matched a name we didn't set.")
# Make sure the name is also in the target:
self.check_name_in_target(bkpt_name)
# Add another name, make sure that works too:
bkpt.AddNameWithErrorHandling(other_bkpt_name)
matches = bkpt.MatchesName(bkpt_name)
self.assertTrue(matches, "Adding a name means we didn't match the name we just set")
self.check_name_in_target(other_bkpt_name)
# Remove the name and make sure we no longer match it:
bkpt.RemoveName(bkpt_name)
matches = bkpt.MatchesName(bkpt_name)
self.assertTrue(not matches,"We still match a name after removing it.")
# Make sure the name list has the remaining name:
name_list = lldb.SBStringList()
bkpt.GetNames(name_list)
num_names = name_list.GetSize()
self.assertEquals(num_names, 1, "Name list has %d items, expected 1."%(num_names))
name = name_list.GetStringAtIndex(0)
self.assertEquals(name, other_bkpt_name, "Remaining name was: %s expected %s."%(name, other_bkpt_name))
def do_check_illegal_names(self):
"""Use Python APIs to check that we reject illegal names."""
bkpt = self.target.BreakpointCreateByLocation(self.main_file_spec, 10)
bad_names = ["-CantStartWithADash",
"1CantStartWithANumber",
"^CantStartWithNonAlpha",
"CantHave-ADash",
"Cant Have Spaces"]
for bad_name in bad_names:
success = bkpt.AddNameWithErrorHandling(bad_name)
self.assertTrue(success.Fail(), "We allowed an illegal name: %s"%(bad_name))
bp_name = lldb.SBBreakpointName(self.target, bad_name)
self.assertFalse(bp_name.IsValid(), "We made a breakpoint name with an illegal name: %s"%(bad_name));
retval =lldb.SBCommandReturnObject()
self.dbg.GetCommandInterpreter().HandleCommand("break set -n whatever -N '%s'"%(bad_name), retval)
self.assertTrue(not retval.Succeeded(), "break set succeeded with: illegal name: %s"%(bad_name))
def do_check_using_names(self):
"""Use Python APIs to check names work in place of breakpoint ID's."""
# Create a dummy breakpoint to use up ID 1
_ = self.target.BreakpointCreateByLocation(self.main_file_spec, 30)
# Create a breakpoint to test with
bkpt = self.target.BreakpointCreateByLocation(self.main_file_spec, 10)
bkpt_name = "ABreakpoint"
bkpt_id = bkpt.GetID()
other_bkpt_name= "_AnotherBreakpoint"
# Add a name and make sure we match it:
success = bkpt.AddNameWithErrorHandling(bkpt_name)
self.assertSuccess(success, "We couldn't add a legal name to a breakpoint.")
bkpts = lldb.SBBreakpointList(self.target)
self.target.FindBreakpointsByName(bkpt_name, bkpts)
self.assertEquals(bkpts.GetSize(), 1, "One breakpoint matched.")
found_bkpt = bkpts.GetBreakpointAtIndex(0)
self.assertEquals(bkpt.GetID(), found_bkpt.GetID(),"The right breakpoint.")
self.assertEquals(bkpt.GetID(), bkpt_id,"With the same ID as before.")
retval = lldb.SBCommandReturnObject()
self.dbg.GetCommandInterpreter().HandleCommand("break disable %s"%(bkpt_name), retval)
self.assertTrue(retval.Succeeded(), "break disable failed with: %s."%(retval.GetError()))
self.assertTrue(not bkpt.IsEnabled(), "We didn't disable the breakpoint.")
# Also make sure we don't apply commands to non-matching names:
self.dbg.GetCommandInterpreter().HandleCommand("break modify --one-shot 1 %s"%(other_bkpt_name), retval)
self.assertTrue(retval.Succeeded(), "break modify failed with: %s."%(retval.GetError()))
self.assertTrue(not bkpt.IsOneShot(), "We applied one-shot to the wrong breakpoint.")
def check_option_values(self, bp_object):
self.assertEqual(bp_object.IsOneShot(), self.is_one_shot, "IsOneShot")
self.assertEqual(bp_object.GetIgnoreCount(), self.ignore_count, "IgnoreCount")
self.assertEqual(bp_object.GetCondition(), self.condition, "Condition")
self.assertEqual(bp_object.GetAutoContinue(), self.auto_continue, "AutoContinue")
self.assertEqual(bp_object.GetThreadID(), self.tid, "Thread ID")
self.assertEqual(bp_object.GetThreadIndex(), self.tidx, "Thread Index")
self.assertEqual(bp_object.GetThreadName(), self.thread_name, "Thread Name")
self.assertEqual(bp_object.GetQueueName(), self.queue_name, "Queue Name")
set_cmds = lldb.SBStringList()
bp_object.GetCommandLineCommands(set_cmds)
self.assertEqual(set_cmds.GetSize(), self.cmd_list.GetSize(), "Size of command line commands")
for idx in range(0, set_cmds.GetSize()):
self.assertEqual(self.cmd_list.GetStringAtIndex(idx), set_cmds.GetStringAtIndex(idx), "Command %d"%(idx))
def make_a_dummy_name(self):
"This makes a breakpoint name in the dummy target to make sure it gets copied over"
dummy_target = self.dbg.GetDummyTarget()
self.assertTrue(dummy_target.IsValid(), "Dummy target was not valid.")
def cleanup ():
self.dbg.GetDummyTarget().DeleteBreakpointName(self.bp_name_string)
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
# Now find it in the dummy target, and make sure these settings took:
bp_name = lldb.SBBreakpointName(dummy_target, self.bp_name_string)
# Make sure the name is right:
self.assertTrue (bp_name.GetName() == self.bp_name_string, "Wrong bp_name: %s"%(bp_name.GetName()))
bp_name.SetOneShot(self.is_one_shot)
bp_name.SetIgnoreCount(self.ignore_count)
bp_name.SetCondition(self.condition)
bp_name.SetAutoContinue(self.auto_continue)
bp_name.SetThreadID(self.tid)
bp_name.SetThreadIndex(self.tidx)
bp_name.SetThreadName(self.thread_name)
bp_name.SetQueueName(self.queue_name)
bp_name.SetCommandLineCommands(self.cmd_list)
# Now look it up again, and make sure it got set correctly.
bp_name = lldb.SBBreakpointName(dummy_target, self.bp_name_string)
self.assertTrue(bp_name.IsValid(), "Failed to make breakpoint name.")
self.check_option_values(bp_name)
def do_check_configuring_names(self):
"""Use Python APIs to check that configuring breakpoint names works correctly."""
other_bp_name_string = "AnotherBreakpointName"
cl_bp_name_string = "CLBreakpointName"
# Now find the version copied in from the dummy target, and make sure these settings took:
bp_name = lldb.SBBreakpointName(self.target, self.bp_name_string)
self.assertTrue(bp_name.IsValid(), "Failed to make breakpoint name.")
self.check_option_values(bp_name)
# Now add this name to a breakpoint, and make sure it gets configured properly
bkpt = self.target.BreakpointCreateByLocation(self.main_file_spec, 10)
success = bkpt.AddNameWithErrorHandling(self.bp_name_string)
self.assertSuccess(success, "Couldn't add this name to the breakpoint")
self.check_option_values(bkpt)
# Now make a name from this breakpoint, and make sure the new name is properly configured:
new_name = lldb.SBBreakpointName(bkpt, other_bp_name_string)
self.assertTrue(new_name.IsValid(), "Couldn't make a valid bp_name from a breakpoint.")
self.check_option_values(bkpt)
# Now change the name's option and make sure it gets propagated to
# the breakpoint:
new_auto_continue = not self.auto_continue
bp_name.SetAutoContinue(new_auto_continue)
self.assertEqual(bp_name.GetAutoContinue(), new_auto_continue, "Couldn't change auto-continue on the name")
self.assertEqual(bkpt.GetAutoContinue(), new_auto_continue, "Option didn't propagate to the breakpoint.")
# Now make this same breakpoint name - but from the command line
cmd_str = "breakpoint name configure %s -o %d -i %d -c '%s' -G %d -t %d -x %d -T '%s' -q '%s' -H '%s'"%(cl_bp_name_string,
self.is_one_shot,
self.ignore_count,
self.condition,
self.auto_continue,
self.tid,
self.tidx,
self.thread_name,
self.queue_name,
self.help_string)
for cmd in self.cmd_list:
cmd_str += " -C '%s'"%(cmd)
self.runCmd(cmd_str, check=True)
# Now look up this name again and check its options:
cl_name = lldb.SBBreakpointName(self.target, cl_bp_name_string)
self.check_option_values(cl_name)
# Also check the help string:
self.assertEqual(self.help_string, cl_name.GetHelpString(), "Help string didn't match")
# Change the name and make sure that works:
new_help = "I do something even more interesting"
cl_name.SetHelpString(new_help)
self.assertEqual(new_help, cl_name.GetHelpString(), "SetHelpString didn't")
# We should have three names now, make sure the target can list them:
name_list = lldb.SBStringList()
self.target.GetBreakpointNames(name_list)
for name_string in [self.bp_name_string, other_bp_name_string, cl_bp_name_string]:
self.assertTrue(name_string in name_list, "Didn't find %s in names"%(name_string))
# Delete the name from the current target. Make sure that works and deletes the
# name from the breakpoint as well:
self.target.DeleteBreakpointName(self.bp_name_string)
name_list.Clear()
self.target.GetBreakpointNames(name_list)
self.assertTrue(self.bp_name_string not in name_list, "Didn't delete %s from a real target"%(self.bp_name_string))
# Also make sure the name got removed from breakpoints holding it:
self.assertFalse(bkpt.MatchesName(self.bp_name_string), "Didn't remove the name from the breakpoint.")
# Test that deleting the name we injected into the dummy target works (there's also a
# cleanup that will do this, but that won't test the result...
dummy_target = self.dbg.GetDummyTarget()
dummy_target.DeleteBreakpointName(self.bp_name_string)
name_list.Clear()
dummy_target.GetBreakpointNames(name_list)
self.assertTrue(self.bp_name_string not in name_list, "Didn't delete %s from the dummy target"%(self.bp_name_string))
# Also make sure the name got removed from breakpoints holding it:
self.assertFalse(bkpt.MatchesName(self.bp_name_string), "Didn't remove the name from the breakpoint.")
def check_permission_results(self, bp_name):
self.assertEqual(bp_name.GetAllowDelete(), False, "Didn't set allow delete.")
protected_bkpt = self.target.BreakpointCreateByLocation(self.main_file_spec, 10)
protected_id = protected_bkpt.GetID()
unprotected_bkpt = self.target.BreakpointCreateByLocation(self.main_file_spec, 10)
unprotected_id = unprotected_bkpt.GetID()
success = protected_bkpt.AddNameWithErrorHandling(self.bp_name_string)
self.assertSuccess(success, "Couldn't add this name to the breakpoint")
self.target.DisableAllBreakpoints()
self.assertEqual(protected_bkpt.IsEnabled(), True, "Didnt' keep breakpoint from being disabled")
self.assertEqual(unprotected_bkpt.IsEnabled(), False, "Protected too many breakpoints from disabling.")
# Try from the command line too:
unprotected_bkpt.SetEnabled(True)
result = lldb.SBCommandReturnObject()
self.dbg.GetCommandInterpreter().HandleCommand("break disable", result)
self.assertTrue(result.Succeeded())
self.assertEqual(protected_bkpt.IsEnabled(), True, "Didnt' keep breakpoint from being disabled")
self.assertEqual(unprotected_bkpt.IsEnabled(), False, "Protected too many breakpoints from disabling.")
self.target.DeleteAllBreakpoints()
bkpt = self.target.FindBreakpointByID(protected_id)
self.assertTrue(bkpt.IsValid(), "Didn't keep the breakpoint from being deleted.")
bkpt = self.target.FindBreakpointByID(unprotected_id)
self.assertFalse(bkpt.IsValid(), "Protected too many breakpoints from deletion.")
# Remake the unprotected breakpoint and try again from the command line:
unprotected_bkpt = self.target.BreakpointCreateByLocation(self.main_file_spec, 10)
unprotected_id = unprotected_bkpt.GetID()
self.dbg.GetCommandInterpreter().HandleCommand("break delete -f", result)
self.assertTrue(result.Succeeded())
bkpt = self.target.FindBreakpointByID(protected_id)
self.assertTrue(bkpt.IsValid(), "Didn't keep the breakpoint from being deleted.")
bkpt = self.target.FindBreakpointByID(unprotected_id)
self.assertFalse(bkpt.IsValid(), "Protected too many breakpoints from deletion.")
def do_check_configuring_permissions_sb(self):
bp_name = lldb.SBBreakpointName(self.target, self.bp_name_string)
# Make a breakpoint name with delete disallowed:
bp_name = lldb.SBBreakpointName(self.target, self.bp_name_string)
self.assertTrue(bp_name.IsValid(), "Failed to make breakpoint name for valid name.")
bp_name.SetAllowDelete(False)
bp_name.SetAllowDisable(False)
bp_name.SetAllowList(False)
self.check_permission_results(bp_name)
def do_check_configuring_permissions_cli(self):
# Make the name with the right options using the command line:
self.runCmd("breakpoint name configure -L 0 -D 0 -A 0 %s"%(self.bp_name_string), check=True)
# Now look up the breakpoint we made, and check that it works.
bp_name = lldb.SBBreakpointName(self.target, self.bp_name_string)
self.assertTrue(bp_name.IsValid(), "Didn't make a breakpoint name we could find.")
self.check_permission_results(bp_name)
| |
# -*- coding: utf-8 -*-
import logging
import httplib
import httplib as http # TODO: Inconsistent usage of aliased import
from dateutil.parser import parse as parse_date
from django.utils import timezone
from flask import request
import markupsafe
import mailchimp
from modularodm.exceptions import ValidationError, NoResultsFound, MultipleResultsFound
from modularodm import Q
from osf.models import Node, NodeRelation
from framework import sentry
from framework.auth import Auth
from framework.auth import utils as auth_utils
from framework.auth.decorators import collect_auth
from framework.auth.decorators import must_be_logged_in
from framework.auth.decorators import must_be_confirmed
from framework.auth.exceptions import ChangePasswordError
from framework.auth.views import send_confirm_email
from framework.auth.signals import user_merged
from framework.exceptions import HTTPError, PermissionsError
from framework.flask import redirect # VOL-aware redirect
from framework.status import push_status_message
from website import mails
from website import mailchimp_utils
from website import settings
from website.project.utils import PROJECT_QUERY
from website.models import ApiOAuth2Application, ApiOAuth2PersonalToken, User
from website.oauth.utils import get_available_scopes
from website.profile import utils as profile_utils
from website.util.time import throttle_period_expired
from website.util import api_v2_url, web_url_for, paths
from website.util.sanitize import escape_html
from website.util.sanitize import strip_html
from website.views import serialize_node_summary
from addons.base import utils as addon_utils
logger = logging.getLogger(__name__)
def get_public_projects(uid=None, user=None):
user = user or User.load(uid)
# In future redesign, should be limited for users with many projects / components
node_ids = (
Node.find_for_user(user, PROJECT_QUERY)
.filter(is_public=True)
.get_roots()
.values_list('id', flat=True)
)
nodes = (
Node.objects.filter(id__in=set(node_ids))
# Defer some fields that we don't use for rendering node lists
.defer('child_node_subscriptions', 'date_created', 'deleted_date', 'description', 'file_guid_to_share_uuids')
.include('guids', 'contributor__user__guids', '_parents__parent__guids')
.order_by('-date_modified')
)
return [
serialize_node_summary(node=node, auth=Auth(user), show_path=False)
for node in nodes
]
def get_public_components(uid=None, user=None):
user = user or User.load(uid)
rel_child_ids = (
NodeRelation.objects.filter(
child__is_public=True,
child__type='osf.node', # nodes only (not collections or registration)
child___contributors=user, # user is a contributor
is_node_link=False # exclude childs by node linkage
)
.exclude(parent__type='osf.collection')
.exclude(child__is_deleted=True)
.values_list('child_id', flat=True)
)
nodes = (Node.objects.filter(id__in=rel_child_ids)
.include('contributor__user__guids', 'guids', '_parents__parent__guids')
# Defer some fields that we don't use for rendering node lists
.defer('child_node_subscriptions', 'date_created', 'deleted_date', 'description',
'file_guid_to_share_uuids')
.order_by('-date_modified'))
return [
serialize_node_summary(node=node, auth=Auth(user), show_path=True)
for node in nodes
]
@must_be_logged_in
def current_user_gravatar(size=None, **kwargs):
user_id = kwargs['auth'].user._id
return get_gravatar(user_id, size=size)
def get_gravatar(uid, size=None):
return {'gravatar_url': profile_utils.get_gravatar(User.load(uid), size=size)}
def date_or_none(date):
try:
return parse_date(date)
except Exception as error:
logger.exception(error)
return None
def validate_user(data, user):
"""Check if the user in request is the user who log in """
if 'id' in data:
if data['id'] != user._id:
raise HTTPError(httplib.FORBIDDEN)
else:
# raise an error if request doesn't have user id
raise HTTPError(httplib.BAD_REQUEST, data={'message_long': '"id" is required'})
@must_be_logged_in
def resend_confirmation(auth):
user = auth.user
data = request.get_json()
validate_user(data, user)
if not throttle_period_expired(user.email_last_sent, settings.SEND_EMAIL_THROTTLE):
raise HTTPError(httplib.BAD_REQUEST,
data={'message_long': 'Too many requests. Please wait a while before sending another confirmation email.'})
try:
primary = data['email']['primary']
confirmed = data['email']['confirmed']
address = data['email']['address'].strip().lower()
except KeyError:
raise HTTPError(httplib.BAD_REQUEST)
if primary or confirmed:
raise HTTPError(httplib.BAD_REQUEST, data={'message_long': 'Cannnot resend confirmation for confirmed emails'})
user.add_unconfirmed_email(address)
# TODO: This setting is now named incorrectly.
if settings.CONFIRM_REGISTRATIONS_BY_EMAIL:
send_confirm_email(user, email=address)
user.email_last_sent = timezone.now()
user.save()
return _profile_view(user, is_profile=True)
@must_be_logged_in
def update_user(auth):
"""Update the logged-in user's profile."""
# trust the decorator to handle auth
user = auth.user
data = request.get_json()
validate_user(data, user)
# TODO: Expand this to support other user attributes
##########
# Emails #
##########
if 'emails' in data:
emails_list = [x['address'].strip().lower() for x in data['emails']]
if user.username.strip().lower() not in emails_list:
raise HTTPError(httplib.FORBIDDEN)
available_emails = [
each.strip().lower() for each in
user.emails + user.unconfirmed_emails
]
# removals
removed_emails = [
each.strip().lower()
for each in available_emails
if each not in emails_list
]
if user.username.strip().lower() in removed_emails:
raise HTTPError(httplib.FORBIDDEN)
for address in removed_emails:
if address in user.emails:
try:
user.remove_email(address)
except PermissionsError as e:
raise HTTPError(httplib.FORBIDDEN, e.message)
user.remove_unconfirmed_email(address)
# additions
added_emails = [
each['address'].strip().lower()
for each in data['emails']
if each['address'].strip().lower() not in available_emails
]
for address in added_emails:
try:
user.add_unconfirmed_email(address)
except (ValidationError, ValueError):
raise HTTPError(http.BAD_REQUEST, data=dict(
message_long='Invalid Email')
)
# TODO: This setting is now named incorrectly.
if settings.CONFIRM_REGISTRATIONS_BY_EMAIL:
send_confirm_email(user, email=address)
############
# Username #
############
# get the first email that is set to primary and has an address
primary_email = next(
(
each for each in data['emails']
# email is primary
if each.get('primary') and each.get('confirmed')
# an address is specified (can't trust those sneaky users!)
and each.get('address')
)
)
if primary_email:
primary_email_address = primary_email['address'].strip().lower()
if primary_email_address not in [each.strip().lower() for each in user.emails]:
raise HTTPError(httplib.FORBIDDEN)
username = primary_email_address
# make sure the new username has already been confirmed
if username and username in user.emails and username != user.username:
mails.send_mail(user.username,
mails.PRIMARY_EMAIL_CHANGED,
user=user,
new_address=username)
# Remove old primary email from subscribed mailing lists
for list_name, subscription in user.mailchimp_mailing_lists.iteritems():
if subscription:
mailchimp_utils.unsubscribe_mailchimp_async(list_name, user._id, username=user.username)
user.username = username
###################
# Timezone/Locale #
###################
if 'locale' in data:
if data['locale']:
locale = data['locale'].replace('-', '_')
user.locale = locale
# TODO: Refactor to something like:
# user.timezone = data.get('timezone', user.timezone)
if 'timezone' in data:
if data['timezone']:
user.timezone = data['timezone']
user.save()
# Update subscribed mailing lists with new primary email
# TODO: move to user.save()
for list_name, subscription in user.mailchimp_mailing_lists.iteritems():
if subscription:
mailchimp_utils.subscribe_mailchimp(list_name, user._id)
return _profile_view(user, is_profile=True)
def _profile_view(profile, is_profile=False, embed_nodes=False):
if profile and profile.is_disabled:
raise HTTPError(http.GONE)
# NOTE: While badges, are unused, 'assertions' and 'badges' can be
# empty lists.
badge_assertions = []
badges = []
if profile:
profile_user_data = profile_utils.serialize_user(profile, full=True, is_profile=is_profile, include_node_counts=embed_nodes)
ret = {
'profile': profile_user_data,
'assertions': badge_assertions,
'badges': badges,
'user': {
'_id': profile._id,
'is_profile': is_profile,
'can_edit': None, # necessary for rendering nodes
'permissions': [], # necessary for rendering nodes
},
}
if embed_nodes:
ret.update({
'public_projects': get_public_projects(user=profile),
'public_components': get_public_components(user=profile),
})
return ret
raise HTTPError(http.NOT_FOUND)
@must_be_logged_in
def profile_view_json(auth):
# Do NOT embed nodes, they aren't necessary
return _profile_view(auth.user, True, embed_nodes=False)
@collect_auth
@must_be_confirmed
def profile_view_id_json(uid, auth):
user = User.load(uid)
is_profile = auth and auth.user == user
# Do NOT embed nodes, they aren't necessary
return _profile_view(user, is_profile, embed_nodes=False)
@must_be_logged_in
def profile_view(auth):
# Embed node data, so profile node lists can be rendered
return _profile_view(auth.user, True, embed_nodes=True)
@collect_auth
@must_be_confirmed
def profile_view_id(uid, auth):
user = User.load(uid)
is_profile = auth and auth.user == user
# Embed node data, so profile node lists can be rendered
return _profile_view(user, is_profile, embed_nodes=True)
@must_be_logged_in
def edit_profile(**kwargs):
# NOTE: This method is deprecated. Use update_user instead.
# TODO: Remove this view
user = kwargs['auth'].user
form = request.form
ret = {'response': 'success'}
if form.get('name') == 'fullname' and form.get('value', '').strip():
user.fullname = strip_html(form['value']).strip()
user.save()
ret['name'] = user.fullname
return ret
def get_profile_summary(user_id, formatter='long'):
user = User.load(user_id)
return user.get_summary(formatter)
@must_be_logged_in
def user_profile(auth, **kwargs):
user = auth.user
return {
'user_id': user._id,
'user_api_url': user.api_url,
}
@must_be_logged_in
def user_account(auth, **kwargs):
user = auth.user
user_addons = addon_utils.get_addons_by_config_type('user', user)
return {
'user_id': user._id,
'addons': user_addons,
'addons_js': collect_user_config_js([addon for addon in settings.ADDONS_AVAILABLE if 'user' in addon.configs]),
'addons_css': [],
'requested_deactivation': user.requested_deactivation,
'external_identity': user.external_identity
}
@must_be_logged_in
def user_account_password(auth, **kwargs):
user = auth.user
old_password = request.form.get('old_password', None)
new_password = request.form.get('new_password', None)
confirm_password = request.form.get('confirm_password', None)
try:
user.change_password(old_password, new_password, confirm_password)
user.save()
except ChangePasswordError as error:
for m in error.messages:
push_status_message(m, kind='warning', trust=False)
else:
push_status_message('Password updated successfully.', kind='success', trust=False)
return redirect(web_url_for('user_account'))
@must_be_logged_in
def user_addons(auth, **kwargs):
user = auth.user
ret = {
'addon_settings': addon_utils.get_addons_by_config_type('accounts', user),
}
accounts_addons = [addon for addon in settings.ADDONS_AVAILABLE if 'accounts' in addon.configs]
ret.update({
'addon_enabled_settings': [addon.short_name for addon in accounts_addons],
'addons_js': collect_user_config_js(accounts_addons),
'addon_capabilities': settings.ADDON_CAPABILITIES,
'addons_css': []
})
return ret
@must_be_logged_in
def user_notifications(auth, **kwargs):
"""Get subscribe data from user"""
return {
'mailing_lists': dict(auth.user.mailchimp_mailing_lists.items() + auth.user.osf_mailing_lists.items())
}
@must_be_logged_in
def oauth_application_list(auth, **kwargs):
"""Return app creation page with list of known apps. API is responsible for tying list to current user."""
app_list_url = api_v2_url('applications/')
return {
'app_list_url': app_list_url
}
@must_be_logged_in
def oauth_application_register(auth, **kwargs):
"""Register an API application: blank form view"""
app_list_url = api_v2_url('applications/') # POST request to this url
return {'app_list_url': app_list_url,
'app_detail_url': ''}
@must_be_logged_in
def oauth_application_detail(auth, **kwargs):
"""Show detail for a single OAuth application"""
client_id = kwargs.get('client_id')
# The client ID must be an active and existing record, and the logged-in user must have permission to view it.
try:
#
record = ApiOAuth2Application.find_one(Q('client_id', 'eq', client_id))
except NoResultsFound:
raise HTTPError(http.NOT_FOUND)
except ValueError: # Invalid client ID -- ApiOAuth2Application will not exist
raise HTTPError(http.NOT_FOUND)
if record.owner != auth.user:
raise HTTPError(http.FORBIDDEN)
if record.is_active is False:
raise HTTPError(http.GONE)
app_detail_url = api_v2_url('applications/{}/'.format(client_id)) # Send request to this URL
return {'app_list_url': '',
'app_detail_url': app_detail_url}
@must_be_logged_in
def personal_access_token_list(auth, **kwargs):
"""Return token creation page with list of known tokens. API is responsible for tying list to current user."""
token_list_url = api_v2_url('tokens/')
return {
'token_list_url': token_list_url
}
@must_be_logged_in
def personal_access_token_register(auth, **kwargs):
"""Register a personal access token: blank form view"""
token_list_url = api_v2_url('tokens/') # POST request to this url
return {'token_list_url': token_list_url,
'token_detail_url': '',
'scope_options': get_available_scopes()}
@must_be_logged_in
def personal_access_token_detail(auth, **kwargs):
"""Show detail for a single personal access token"""
_id = kwargs.get('_id')
# The ID must be an active and existing record, and the logged-in user must have permission to view it.
try:
record = ApiOAuth2PersonalToken.find_one(Q('_id', 'eq', _id))
except NoResultsFound:
raise HTTPError(http.NOT_FOUND)
if record.owner != auth.user:
raise HTTPError(http.FORBIDDEN)
if record.is_active is False:
raise HTTPError(http.GONE)
token_detail_url = api_v2_url('tokens/{}/'.format(_id)) # Send request to this URL
return {'token_list_url': '',
'token_detail_url': token_detail_url,
'scope_options': get_available_scopes()}
@must_be_logged_in
def delete_external_identity(auth, **kwargs):
"""Removes single external identity from user"""
data = request.get_json()
identity = data.get('identity')
if not identity:
raise HTTPError(http.BAD_REQUEST)
for service in auth.user.external_identity:
if identity in auth.user.external_identity[service]:
auth.user.external_identity[service].pop(identity)
if len(auth.user.external_identity[service]) == 0:
auth.user.external_identity.pop(service)
auth.user.save()
return
raise HTTPError(http.NOT_FOUND, 'Unable to find requested identity')
def collect_user_config_js(addon_configs):
"""Collect webpack bundles for each of the addons' user-cfg.js modules. Return
the URLs for each of the JS modules to be included on the user addons config page.
:param list addons: List of user's addon config records.
"""
js_modules = []
for addon_config in addon_configs:
js_path = paths.resolve_addon_path(addon_config, 'user-cfg.js')
if js_path:
js_modules.append(js_path)
return js_modules
@must_be_logged_in
def user_choose_addons(**kwargs):
auth = kwargs['auth']
json_data = escape_html(request.get_json())
auth.user.config_addons(json_data, auth)
@must_be_logged_in
def user_choose_mailing_lists(auth, **kwargs):
""" Update mailing list subscription on user model and in mailchimp
Example input:
{
"Open Science Framework General": true,
...
}
"""
user = auth.user
json_data = escape_html(request.get_json())
if json_data:
for list_name, subscribe in json_data.items():
# TO DO: change this to take in any potential non-mailchimp, something like try: update_subscription(), except IndexNotFound: update_mailchimp_subscription()
if list_name == settings.OSF_HELP_LIST:
update_osf_help_mails_subscription(user=user, subscribe=subscribe)
else:
update_mailchimp_subscription(user, list_name, subscribe)
else:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_long="Must provide a dictionary of the format {'mailing list name': Boolean}")
)
user.save()
all_mailing_lists = {}
all_mailing_lists.update(user.mailchimp_mailing_lists)
all_mailing_lists.update(user.osf_mailing_lists)
return {'message': 'Successfully updated mailing lists', 'result': all_mailing_lists}, 200
@user_merged.connect
def update_mailchimp_subscription(user, list_name, subscription, send_goodbye=True):
""" Update mailing list subscription in mailchimp.
:param obj user: current user
:param str list_name: mailing list
:param boolean subscription: true if user is subscribed
"""
if subscription:
try:
mailchimp_utils.subscribe_mailchimp(list_name, user._id)
except mailchimp.Error:
pass
else:
try:
mailchimp_utils.unsubscribe_mailchimp_async(list_name, user._id, username=user.username, send_goodbye=send_goodbye)
except mailchimp.Error:
# User has already unsubscribed, so nothing to do
pass
def mailchimp_get_endpoint(**kwargs):
"""Endpoint that the mailchimp webhook hits to check that the OSF is responding"""
return {}, http.OK
def sync_data_from_mailchimp(**kwargs):
"""Endpoint that the mailchimp webhook sends its data to"""
key = request.args.get('key')
if key == settings.MAILCHIMP_WEBHOOK_SECRET_KEY:
r = request
action = r.values['type']
list_name = mailchimp_utils.get_list_name_from_id(list_id=r.values['data[list_id]'])
username = r.values['data[email]']
try:
user = User.find_one(Q('username', 'eq', username))
except NoResultsFound:
sentry.log_exception()
sentry.log_message('A user with this username does not exist.')
raise HTTPError(404, data=dict(message_short='User not found',
message_long='A user with this username does not exist'))
if action == 'unsubscribe':
user.mailchimp_mailing_lists[list_name] = False
user.save()
elif action == 'subscribe':
user.mailchimp_mailing_lists[list_name] = True
user.save()
else:
# TODO: get tests to pass with sentry logging
# sentry.log_exception()
# sentry.log_message("Unauthorized request to the OSF.")
raise HTTPError(http.UNAUTHORIZED)
@must_be_logged_in
def impute_names(**kwargs):
name = request.args.get('name', '')
return auth_utils.impute_names(name)
def update_osf_help_mails_subscription(user, subscribe):
user.osf_mailing_lists[settings.OSF_HELP_LIST] = subscribe
user.save()
@must_be_logged_in
def serialize_names(**kwargs):
user = kwargs['auth'].user
return {
'full': user.fullname,
'given': user.given_name,
'middle': user.middle_names,
'family': user.family_name,
'suffix': user.suffix,
}
def get_target_user(auth, uid=None):
target = User.load(uid) if uid else auth.user
if target is None:
raise HTTPError(http.NOT_FOUND)
return target
def fmt_date_or_none(date, fmt='%Y-%m-%d'):
if date:
try:
return date.strftime(fmt)
except ValueError:
raise HTTPError(
http.BAD_REQUEST,
data=dict(message_long='Year entered must be after 1900')
)
return None
def append_editable(data, auth, uid=None):
target = get_target_user(auth, uid)
data['editable'] = auth.user == target
def serialize_social_addons(user):
ret = {}
for user_settings in user.get_addons():
config = user_settings.config
if user_settings.public_id:
ret[config.short_name] = user_settings.public_id
return ret
@collect_auth
def serialize_social(auth, uid=None, **kwargs):
target = get_target_user(auth, uid)
ret = target.social
append_editable(ret, auth, uid)
if ret['editable']:
ret['addons'] = serialize_social_addons(target)
return ret
def serialize_job(job):
return {
'institution': job.get('institution'),
'department': job.get('department'),
'title': job.get('title'),
'startMonth': job.get('startMonth'),
'startYear': job.get('startYear'),
'endMonth': job.get('endMonth'),
'endYear': job.get('endYear'),
'ongoing': job.get('ongoing', False),
}
def serialize_school(school):
return {
'institution': school.get('institution'),
'department': school.get('department'),
'degree': school.get('degree'),
'startMonth': school.get('startMonth'),
'startYear': school.get('startYear'),
'endMonth': school.get('endMonth'),
'endYear': school.get('endYear'),
'ongoing': school.get('ongoing', False),
}
def serialize_contents(field, func, auth, uid=None):
target = get_target_user(auth, uid)
ret = {
'contents': [
func(content)
for content in getattr(target, field)
]
}
append_editable(ret, auth, uid)
return ret
@collect_auth
def serialize_jobs(auth, uid=None, **kwargs):
ret = serialize_contents('jobs', serialize_job, auth, uid)
append_editable(ret, auth, uid)
return ret
@collect_auth
def serialize_schools(auth, uid=None, **kwargs):
ret = serialize_contents('schools', serialize_school, auth, uid)
append_editable(ret, auth, uid)
return ret
@must_be_logged_in
def unserialize_names(**kwargs):
user = kwargs['auth'].user
json_data = escape_html(request.get_json())
# json get can return None, use `or` here to ensure we always strip a string
user.fullname = (json_data.get('full') or '').strip()
user.given_name = (json_data.get('given') or '').strip()
user.middle_names = (json_data.get('middle') or '').strip()
user.family_name = (json_data.get('family') or '').strip()
user.suffix = (json_data.get('suffix') or '').strip()
user.save()
def verify_user_match(auth, **kwargs):
uid = kwargs.get('uid')
if uid and uid != auth.user._id:
raise HTTPError(http.FORBIDDEN)
@must_be_logged_in
def unserialize_social(auth, **kwargs):
verify_user_match(auth, **kwargs)
user = auth.user
json_data = escape_html(request.get_json())
for soc in user.SOCIAL_FIELDS.keys():
user.social[soc] = json_data.get(soc)
try:
user.save()
except ValidationError as exc:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_long=exc.messages[0]
))
def unserialize_job(job):
return {
'institution': job.get('institution'),
'department': job.get('department'),
'title': job.get('title'),
'startMonth': job.get('startMonth'),
'startYear': job.get('startYear'),
'endMonth': job.get('endMonth'),
'endYear': job.get('endYear'),
'ongoing': job.get('ongoing'),
}
def unserialize_school(school):
return {
'institution': school.get('institution'),
'department': school.get('department'),
'degree': school.get('degree'),
'startMonth': school.get('startMonth'),
'startYear': school.get('startYear'),
'endMonth': school.get('endMonth'),
'endYear': school.get('endYear'),
'ongoing': school.get('ongoing'),
}
def unserialize_contents(field, func, auth):
user = auth.user
json_data = escape_html(request.get_json())
setattr(
user,
field,
[
func(content)
for content in json_data.get('contents', [])
]
)
user.save()
@must_be_logged_in
def unserialize_jobs(auth, **kwargs):
verify_user_match(auth, **kwargs)
unserialize_contents('jobs', unserialize_job, auth)
# TODO: Add return value
@must_be_logged_in
def unserialize_schools(auth, **kwargs):
verify_user_match(auth, **kwargs)
unserialize_contents('schools', unserialize_school, auth)
# TODO: Add return value
@must_be_logged_in
def request_export(auth):
user = auth.user
if not throttle_period_expired(user.email_last_sent, settings.SEND_EMAIL_THROTTLE):
raise HTTPError(httplib.BAD_REQUEST,
data={'message_long': 'Too many requests. Please wait a while before sending another account export request.',
'error_type': 'throttle_error'})
mails.send_mail(
to_addr=settings.SUPPORT_EMAIL,
mail=mails.REQUEST_EXPORT,
user=auth.user,
)
user.email_last_sent = timezone.now()
user.save()
return {'message': 'Sent account export request'}
@must_be_logged_in
def request_deactivation(auth):
user = auth.user
if not throttle_period_expired(user.email_last_sent, settings.SEND_EMAIL_THROTTLE):
raise HTTPError(http.BAD_REQUEST,
data={
'message_long': 'Too many requests. Please wait a while before sending another account deactivation request.',
'error_type': 'throttle_error'
})
mails.send_mail(
to_addr=settings.SUPPORT_EMAIL,
mail=mails.REQUEST_DEACTIVATION,
user=auth.user,
)
user.email_last_sent = timezone.now()
user.requested_deactivation = True
user.save()
return {'message': 'Sent account deactivation request'}
def redirect_to_twitter(twitter_handle):
"""Redirect GET requests for /@TwitterHandle/ to respective the OSF user
account if it associated with an active account
:param uid: uid for requested User
:return: Redirect to User's Twitter account page
"""
try:
user = User.find_one(Q('social.twitter', 'iexact', twitter_handle))
except NoResultsFound:
raise HTTPError(http.NOT_FOUND, data={
'message_short': 'User Not Found',
'message_long': 'There is no active user associated with the Twitter handle: {0}.'.format(twitter_handle)
})
except MultipleResultsFound:
users = User.find(Q('social.twitter', 'iexact', twitter_handle))
message_long = 'There are multiple OSF accounts associated with the ' \
'Twitter handle: <strong>{0}</strong>. <br /> Please ' \
'select from the accounts below. <br /><ul>'.format(markupsafe.escape(twitter_handle))
for user in users:
message_long += '<li><a href="{0}">{1}</a></li>'.format(user.url, markupsafe.escape(user.fullname))
message_long += '</ul>'
raise HTTPError(http.MULTIPLE_CHOICES, data={
'message_short': 'Multiple Users Found',
'message_long': message_long
})
return redirect(user.url)
| |
u'''
Created on Oct 5, 2010
Refactored from ModelObject on Jun 11, 2011
@author: Mark V Systems Limited
(c) Copyright 2010 Mark V Systems Limited, All rights reserved.
'''
from __future__ import with_statement
import os, io, logging
from arelle import XmlUtil, XbrlConst, ModelValue
from arelle.ModelObject import ModelObject
from arelle.PluginManager import pluginClassMethods
class ModelTestcaseVariation(ModelObject):
def init(self, modelDocument):
super(ModelTestcaseVariation, self).init(modelDocument)
self.status = u""
self.actual = []
self.assertions = None
@property
def id(self):
# if there is a real ID, use it
id = super(ModelTestcaseVariation, self).id
if id is not None:
return id
# no ID, use the object ID so it isn't None
return self.objectId()
@property
def name(self):
try:
return self._name
except AttributeError:
if self.get(u"name"):
self._name = self.get(u"name")
else:
nameElement = XmlUtil.descendant(self, None, u"name" if self.localName != u"testcase" else u"number")
if nameElement is not None:
self._name = XmlUtil.innerText(nameElement)
else:
self._name = None
return self._name
@property
def description(self):
nameElement = XmlUtil.descendant(self, None, (u"description", u"documentation"))
if nameElement is not None:
return XmlUtil.innerText(nameElement)
return None
@property
def reference(self):
efmNameElts = XmlUtil.children(self.getparent(), None, u"name")
for efmNameElt in efmNameElts:
if efmNameElt is not None and efmNameElt.text.startswith(u"EDGAR"):
return efmNameElt.text
referenceElement = XmlUtil.descendant(self, None, u"reference")
if referenceElement is not None: # formula test suite
return u"{0}#{1}".format(referenceElement.get(u"specification"), referenceElement.get(u"id"))
referenceElement = XmlUtil.descendant(self, None, u"documentationReference")
if referenceElement is not None: # w3c test suite
return referenceElement.get(u"{http://www.w3.org/1999/xlink}href")
descriptionElement = XmlUtil.descendant(self, None, u"description")
if descriptionElement is not None and descriptionElement.get(u"reference"):
return descriptionElement.get(u"reference") # xdt test suite
if self.getparent().get(u"description"):
return self.getparent().get(u"description") # base spec 2.1 test suite
functRegistryRefElt = XmlUtil.descendant(self.getparent(), None, u"reference")
if functRegistryRefElt is not None: # function registry
return functRegistryRefElt.get(u"{http://www.w3.org/1999/xlink}href")
return None
@property
def readMeFirstUris(self):
try:
return self._readMeFirstUris
except AttributeError:
self._readMeFirstUris = []
# first look if any plugin method to get readme first URIs
if not any(pluginXbrlMethod(self)
for pluginXbrlMethod in pluginClassMethods(u"ModelTestcaseVariation.ReadMeFirstUris")):
if self.localName == u"testGroup": #w3c testcase
instanceTestElement = XmlUtil.descendant(self, None, u"instanceTest")
if instanceTestElement is not None: # take instance first
self._readMeFirstUris.append(XmlUtil.descendantAttr(instanceTestElement, None,
u"instanceDocument",
u"{http://www.w3.org/1999/xlink}href"))
else:
schemaTestElement = XmlUtil.descendant(self, None, u"schemaTest")
if schemaTestElement is not None:
self._readMeFirstUris.append(XmlUtil.descendantAttr(schemaTestElement, None,
u"schemaDocument",
u"{http://www.w3.org/1999/xlink}href"))
elif self.localName == u"test-case": #xpath testcase
inputFileElement = XmlUtil.descendant(self, None, u"input-file")
if inputFileElement is not None: # take instance first
self._readMeFirstUris.append(u"TestSources/" + inputFileElement.text + u".xml")
else:
# default built-in method for readme first uris
for anElement in self.iterdescendants():
if isinstance(anElement,ModelObject) and anElement.get(u"readMeFirst") == u"true":
if anElement.get(u"{http://www.w3.org/1999/xlink}href"):
uri = anElement.get(u"{http://www.w3.org/1999/xlink}href")
else:
uri = XmlUtil.innerText(anElement)
if anElement.get(u"name"):
self._readMeFirstUris.append( (ModelValue.qname(anElement, anElement.get(u"name")), uri) )
elif anElement.get(u"dts"):
self._readMeFirstUris.append( (anElement.get(u"dts"), uri) )
else:
self._readMeFirstUris.append(uri)
if not self._readMeFirstUris: # provide a dummy empty instance document
self._readMeFirstUris.append(os.path.join(self.modelXbrl.modelManager.cntlr.configDir, u"empty-instance.xml"))
return self._readMeFirstUris
@property
def parameters(self):
try:
return self._parameters
except AttributeError:
self._parameters = dict([
(ModelValue.qname(paramElt, paramElt.get(u"name")), # prefix-less parameter names take default namespace of element
(ModelValue.qname(paramElt, paramElt.get(u"datatype")),paramElt.get(u"value")))
for paramElt in XmlUtil.descendants(self, self.namespaceURI, u"parameter")])
return self._parameters
@property
def resultIsVersioningReport(self):
return XmlUtil.descendant(XmlUtil.descendant(self, None, u"result"), None, u"versioningReport") is not None
@property
def versioningReportUri(self):
return XmlUtil.text(XmlUtil.descendant(self, None, u"versioningReport"))
@property
def resultIsXbrlInstance(self):
return XmlUtil.descendant(XmlUtil.descendant(self, None, u"result"), None, u"instance") is not None
@property
def resultXbrlInstanceUri(self):
resultInstance = XmlUtil.descendant(XmlUtil.descendant(self, None, u"result"), None, u"instance")
if resultInstance is not None:
return XmlUtil.text(resultInstance)
return None
@property
def resultIsInfoset(self):
if self.modelDocument.outpath:
result = XmlUtil.descendant(self, None, u"result")
if result is not None:
return XmlUtil.child(result, None, u"file") is not None or XmlUtil.text(result).endswith(u".xml")
return False
@property
def resultInfosetUri(self):
result = XmlUtil.descendant(self, None, u"result")
if result is not None:
child = XmlUtil.child(result, None, u"file")
return os.path.join(self.modelDocument.outpath, XmlUtil.text(child if child is not None else result))
return None
@property
def resultIsTable(self):
result = XmlUtil.descendant(self, None, u"result")
if result is not None :
child = XmlUtil.child(result, None, u"table")
return child is not None and XmlUtil.text(child).endswith(u".xml")
return False
@property
def resultTableUri(self):
result = XmlUtil.descendant(self, None, u"result")
if result is not None:
child = XmlUtil.child(result, None, u"table")
return os.path.join(self.modelDocument.outpath, XmlUtil.text(child if child is not None else result))
return None
@property
def cfcnCall(self):
# tuple of (expression, element holding the expression)
try:
return self._cfcnCall
except AttributeError:
self._cfcnCall = None
if self.localName == u"test-case": #xpath testcase
queryElement = XmlUtil.descendant(self, None, u"query")
if queryElement is not None:
filepath = (self.modelDocument.filepathdir + u"/" + u"Queries/XQuery/" +
self.get(u"FilePath") + queryElement.get(u"name") + u'.xq')
if os.sep != u"/": filepath = filepath.replace(u"/", os.sep)
with io.open(filepath, u'rt', encoding=u'utf-8') as f:
self._cfcnCall = (f.read(), self)
else:
for callElement in XmlUtil.descendants(self, XbrlConst.cfcn, u"call"):
self._cfcnCall = (XmlUtil.innerText(callElement), callElement)
break
if self._cfcnCall is None and self.namespaceURI == u"http://xbrl.org/2011/conformance-rendering/transforms":
name = self.getparent().get(u"name")
input = self.get(u"input")
if name and input:
self._cfcnCall = (u"{0}('{1}')".format(name, input.replace(u"'",u"''")), self)
return self._cfcnCall
@property
def cfcnTest(self):
# tuple of (expression, element holding the expression)
try:
return self._cfcnTest
except AttributeError:
self._cfcnTest = None
if self.localName == u"test-case": #xpath testcase
outputFileElement = XmlUtil.descendant(self, None, u"output-file")
if outputFileElement is not None and outputFileElement.get(u"compare") == u"Text":
filepath = (self.modelDocument.filepathdir + u"/" + u"ExpectedTestResults/" +
self.get(u"FilePath") + outputFileElement.text)
if os.sep != u"/": filepath = filepath.replace(u"/", os.sep)
with io.open(filepath, u'rt', encoding=u'utf-8') as f:
self._cfcnTest = (u"xs:string($result) eq '{0}'".format(f.read()), self)
else:
testElement = XmlUtil.descendant(self, XbrlConst.cfcn, u"test")
if testElement is not None:
self._cfcnTest = (XmlUtil.innerText(testElement), testElement)
elif self.namespaceURI == u"http://xbrl.org/2011/conformance-rendering/transforms":
output = self.get(u"output")
if output:
self._cfcnTest = (u"$result eq '{0}'".format(output.replace(u"'",u"''")), self)
return self._cfcnTest
@property
def expected(self):
for pluginXbrlMethod in pluginClassMethods(u"ModelTestcaseVariation.ExpectedResult"):
expected = pluginXbrlMethod(self)
if expected:
return expected
# default behavior without plugins
if self.localName == u"testcase":
return self.document.basename[:4] #starts with PASS or FAIL
elif self.localName == u"testGroup": #w3c testcase
instanceTestElement = XmlUtil.descendant(self, None, u"instanceTest")
if instanceTestElement is not None: # take instance first
return XmlUtil.descendantAttr(instanceTestElement, None, u"expected", u"validity")
else:
schemaTestElement = XmlUtil.descendant(self, None, u"schemaTest")
if schemaTestElement is not None:
return XmlUtil.descendantAttr(schemaTestElement, None, u"expected", u"validity")
errorElement = XmlUtil.descendant(self, None, u"error")
if errorElement is not None:
return ModelValue.qname(errorElement, XmlUtil.text(errorElement))
resultElement = XmlUtil.descendant(self, None, u"result")
if resultElement is not None:
expected = resultElement.get(u"expected")
if expected:
return expected
for assertElement in XmlUtil.children(resultElement, None, u"assert"):
num = assertElement.get(u"num")
if len(num) == 5:
return u"EFM.{0}.{1}.{2}".format(num[0],num[1:3],num[3:6])
asserTests = {}
for atElt in XmlUtil.children(resultElement, None, u"assertionTests"):
try:
asserTests[atElt.get(u"assertionID")] = (_INT(atElt.get(u"countSatisfied")),_INT(atElt.get(u"countNotSatisfied")))
except ValueError:
pass
if asserTests:
return asserTests
elif self.get(u"result"):
return self.get(u"result")
return None
@property
def severityLevel(self):
for pluginXbrlMethod in pluginClassMethods(u"ModelTestcaseVariation.ExpectedSeverity"):
severityLevelName = pluginXbrlMethod(self)
if severityLevelName: # ignore plug in if not a plug-in-recognized test case
return logging._checkLevel(severityLevelName)
# default behavior without plugins
# SEC error cases have <assert severity={err|wrn}>...
if XmlUtil.descendant(self, None, u"assert", attrName=u"severity", attrValue=u"wrn") is not None:
return logging._checkLevel(u"WARNING")
return logging._checkLevel(u"INCONSISTENCY")
@property
def expectedVersioningReport(self):
XmlUtil.text(XmlUtil.text(XmlUtil.descendant(XmlUtil.descendant(self, None, u"result"), None, u"versioningReport")))
@property
def propertyView(self):
assertions = []
for assertionElement in XmlUtil.descendants(self, None, u"assertionTests"):
assertions.append((u"assertion",assertionElement.get(u"assertionID")))
assertions.append((u" satisfied", assertionElement.get(u"countSatisfied")))
assertions.append((u" not sat.", assertionElement.get(u"countNotSatisfied")))
u'''
for assertionElement in XmlUtil.descendants(self, None, "assert"):
efmNum = assertionElement.get("num")
assertions.append(("assertion",
"EFM.{0}.{1}.{2}".format(efmNum[0], efmNum[1:2], efmNum[3:4])))
assertions.append((" not sat.", "1"))
'''
readMeFirsts = [(u"readFirst", readMeFirstUri) for readMeFirstUri in self.readMeFirstUris]
parameters = []
if len(self.parameters) > 0: parameters.append((u"parameters", None))
for pName, pTypeValue in self.parameters.items():
parameters.append((pName,pTypeValue[1]))
return [(u"id", self.id),
(u"name", self.name),
(u"description", self.description)] + \
readMeFirsts + \
parameters + \
[(u"status", self.status),
(u"call", self.cfcnCall[0]) if self.cfcnCall else (),
(u"test", self.cfcnTest[0]) if self.cfcnTest else (),
(u"infoset", self.resultInfosetUri) if self.resultIsInfoset else (),
(u"expected", self.expected) if self.expected else (),
(u"actual", u" ".join(unicode(i) for i in self.actual) if len(self.actual) > 0 else ())] + \
assertions
def __repr__(self):
return (u"modelTestcaseVariation[{0}]{1})".format(self.objectId(),self.propertyView))
| |
"""Tasks related to projects, including fetching repository code, cleaning
``conf.py`` files, and rebuilding documentation.
"""
import fnmatch
import os
import shutil
import json
import logging
import socket
import requests
import datetime
from celery import task
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from slumber.exceptions import HttpClientError
from builds.constants import LATEST
from builds.models import Build, Version
from core.utils import send_email, run_on_app_servers
from doc_builder.loader import get_builder_class
from doc_builder.base import restoring_chdir
from doc_builder.environments import DockerEnvironment
from projects.exceptions import ProjectImportError
from projects.models import ImportedFile, Project
from projects.utils import run, make_api_version, make_api_project
from projects.constants import LOG_TEMPLATE
from builds.constants import STABLE
from projects import symlinks
from privacy.loader import Syncer
from search.parse_json import process_all_json_files
from search.utils import process_mkdocs_json
from restapi.utils import index_search_request
from vcs_support import utils as vcs_support_utils
from api.client import api as api_v1
from restapi.client import api as api_v2
try:
from readthedocs.projects.signals import before_vcs, after_vcs, before_build, after_build
except:
from projects.signals import before_vcs, after_vcs, before_build, after_build
log = logging.getLogger(__name__)
HTML_ONLY = getattr(settings, 'HTML_ONLY_PROJECTS', ())
@task(default_retry_delay=7 * 60, max_retries=5)
@restoring_chdir
def update_docs(pk, version_pk=None, build_pk=None, record=True, docker=False,
search=True, force=False, intersphinx=True, localmedia=True,
basic=False, **kwargs):
"""
The main entry point for updating documentation.
It handles all of the logic around whether a project is imported or we
created it. Then it will build the html docs and other requested parts.
`pk`
Primary key of the project to update
`record`
Whether or not to keep a record of the update in the database. Useful
for preventing changes visible to the end-user when running commands
from the shell, for example.
"""
start_time = datetime.datetime.utcnow()
try:
project_data = api_v1.project(pk).get()
except HttpClientError:
log.exception(LOG_TEMPLATE.format(project=pk, version='', msg='Failed to get project data on build. Erroring.'))
project = make_api_project(project_data)
# Don't build skipped projects
if project.skip:
log.info(LOG_TEMPLATE.format(project=project.slug, version='', msg='Skipping'))
return
else:
log.info(LOG_TEMPLATE.format(project=project.slug, version='', msg='Building'))
version = ensure_version(project, version_pk)
build = create_build(build_pk)
results = {}
# Build Servery stuff
try:
record_build(build=build, record=record, results=results, state='cloning')
vcs_results = setup_vcs(version, build)
if vcs_results:
results.update(vcs_results)
if project.documentation_type == 'auto':
update_documentation_type(version)
if docker or settings.DOCKER_ENABLE:
record_build(build=build, record=record, results=results, state='building')
docker = DockerEnvironment(version)
build_results = docker.build()
results.update(build_results)
else:
record_build(build=build, record=record, results=results, state='installing')
setup_results = setup_environment(version)
results.update(setup_results)
record_build(build=build, record=record, results=results, state='building')
build_results = build_docs(version, force, search, localmedia)
results.update(build_results)
except vcs_support_utils.LockTimeout, e:
results['checkout'] = (423, "", "Version locked, retrying in 5 minutes.")
log.info(LOG_TEMPLATE.format(project=version.project.slug,
version=version.slug, msg="Unable to lock, will retry"))
# http://celery.readthedocs.org/en/3.0/userguide/tasks.html#retrying
# Should completely retry the task for us until max_retries is exceeded
update_docs.retry(exc=e, throw=False)
except ProjectImportError, e:
results['checkout'] = (404, "", 'Failed to import project; skipping build.\n\nError\n-----\n\n%s' % e.message)
# Close out build in finally with error.
pass
except Exception, e:
log.error(LOG_TEMPLATE.format(project=version.project.slug,
version=version.slug, msg="Top-level Build Failure"), exc_info=True)
results['checkout'] = (404, "", 'Top-level Build Failure: %s' % e.message)
finally:
record_build(build=build, record=record, results=results, state='finished', start_time=start_time)
record_pdf(record=record, results=results, state='finished', version=version)
log.info(LOG_TEMPLATE.format(project=version.project.slug, version='', msg='Build finished'))
build_id = build.get('id')
# Web Server Tasks
if build_id:
finish_build.delay(
version_pk=version.pk,
build_pk=build_id,
hostname=socket.gethostname(),
html=results.get('html', [404])[0] == 0,
localmedia=results.get('localmedia', [404])[0] == 0,
search=results.get('search', [404])[0] == 0,
pdf=version.project.enable_pdf_build,
epub=version.project.enable_epub_build,
)
def ensure_version(project, version_pk):
"""
Ensure we're using a sane version.
"""
if version_pk:
version_data = api_v1.version(version_pk).get()
else:
version_data = api_v1.version(project.slug).get(slug=LATEST)['objects'][0]
version = make_api_version(version_data)
return version
def update_documentation_type(version):
"""
Automatically determine the doc type for a user.
"""
checkout_path = version.project.checkout_path(version.slug)
os.chdir(checkout_path)
files = run('find .')[1].split('\n')
markdown = sphinx = 0
for filename in files:
if fnmatch.fnmatch(filename, '*.md') or fnmatch.fnmatch(filename, '*.markdown'):
markdown += 1
elif fnmatch.fnmatch(filename, '*.rst'):
sphinx += 1
ret = 'sphinx'
if markdown > sphinx:
ret = 'mkdocs'
project_data = api_v2.project(version.project.pk).get()
project_data['documentation_type'] = ret
api_v2.project(version.project.pk).put(project_data)
version.project.documentation_type = ret
def docker_build(version, search=True, force=False, intersphinx=True,
localmedia=True):
"""
The code that executes inside of docker
"""
environment_results = setup_environment(version)
results = build_docs(version=version, force=force, search=search,
localmedia=localmedia)
results.update(environment_results)
return results
def setup_vcs(version, build):
"""
Update the checkout of the repo to make sure it's the latest.
This also syncs versions in the DB.
"""
log.info(LOG_TEMPLATE.format(project=version.project.slug,
version=version.slug, msg='Updating docs from VCS'))
try:
update_output = update_imported_docs(version.pk)
commit = version.project.vcs_repo(version.slug).commit
if commit:
build['commit'] = commit
except ProjectImportError:
log.error(LOG_TEMPLATE.format(project=version.project.slug, version=version.slug,
msg='Failed to import project; skipping build'), exc_info=True)
raise
return update_output
@task()
def update_imported_docs(version_pk):
"""
Check out or update the given project's repository.
"""
version_data = api_v1.version(version_pk).get()
version = make_api_version(version_data)
project = version.project
ret_dict = {}
# Make Dirs
if not os.path.exists(project.doc_path):
os.makedirs(project.doc_path)
if not project.vcs_repo():
raise ProjectImportError(("Repo type '{0}' unknown".format(project.repo_type)))
with project.repo_nonblockinglock(version=version,
max_lock_age=getattr(settings, 'REPO_LOCK_SECONDS', 30)):
before_vcs.send(sender=version)
# Get the actual code on disk
if version:
log.info(
LOG_TEMPLATE.format(
project=project.slug,
version=version.slug,
msg='Checking out version {slug}: {identifier}'.format(
slug=version.slug,
identifier=version.identifier
)
)
)
version_slug = version.slug
version_repo = project.vcs_repo(version_slug)
ret_dict['checkout'] = version_repo.checkout(
version.identifier,
)
else:
# Does this ever get called?
log.info(LOG_TEMPLATE.format(
project=project.slug, version=version.slug, msg='Updating to latest revision'))
version_slug = LATEST
version_repo = project.vcs_repo(version_slug)
ret_dict['checkout'] = version_repo.update()
after_vcs.send(sender=version)
# Update tags/version
version_post_data = {'repo': version_repo.repo_url}
if version_repo.supports_tags:
version_post_data['tags'] = [
{'identifier': v.identifier,
'verbose_name': v.verbose_name,
} for v in version_repo.tags
]
if version_repo.supports_branches:
version_post_data['branches'] = [
{'identifier': v.identifier,
'verbose_name': v.verbose_name,
} for v in version_repo.branches
]
try:
api_v2.project(project.pk).sync_versions.post(version_post_data)
except Exception, e:
print "Sync Versions Exception: %s" % e.message
return ret_dict
def setup_environment(version):
"""
Build the virtualenv and install the project into it.
Always build projects with a virtualenv.
"""
ret_dict = {}
project = version.project
build_dir = os.path.join(project.venv_path(version=version.slug), 'build')
if os.path.exists(build_dir):
log.info(LOG_TEMPLATE.format(project=project.slug, version=version.slug, msg='Removing existing build dir'))
shutil.rmtree(build_dir)
if project.use_system_packages:
site_packages = '--system-site-packages'
else:
site_packages = '--no-site-packages'
# Here the command has been modified to support different
# interpreters.
ret_dict['venv'] = run(
'{cmd} {site_packages} {path}'.format(
cmd='{interpreter} -m virtualenv'.format(
interpreter=project.python_interpreter),
site_packages=site_packages,
path=project.venv_path(version=version.slug)
)
)
# Other code expects sphinx-build to be installed inside the
# virtualenv. Using the -I option makes sure it gets installed
# even if it is already installed system-wide (and
# --system-site-packages is used)
if project.use_system_packages:
ignore_option = '-I'
else:
ignore_option = ''
requirements = ' '.join([
'sphinx==1.3.1',
'Pygments==2.0.2',
'virtualenv==13.1.0',
'setuptools==18.0.1',
'docutils==0.11',
'mkdocs==0.14.0',
'mock==1.0.1',
'pillow==2.6.1',
'readthedocs-sphinx-ext==0.5.4',
'sphinx-rtd-theme==0.1.8',
'alabaster>=0.7,<0.8,!=0.7.5',
'recommonmark==0.1.1',
])
wheeldir = os.path.join(settings.SITE_ROOT, 'deploy', 'wheels')
ret_dict['doc_builder'] = run(
(
'{cmd} install --use-wheel --find-links={wheeldir} -U '
'{ignore_option} {requirements}'
).format(
cmd=project.venv_bin(version=version.slug, bin='pip'),
ignore_option=ignore_option,
wheeldir=wheeldir,
requirements=requirements,
)
)
# Handle requirements
requirements_file_path = project.requirements_file
checkout_path = project.checkout_path(version.slug)
if not requirements_file_path:
builder_class = get_builder_class(project.documentation_type)
docs_dir = builder_class(version).docs_dir()
for path in [docs_dir, '']:
for req_file in ['pip_requirements.txt', 'requirements.txt']:
test_path = os.path.join(checkout_path, path, req_file)
print('Testing %s' % test_path)
if os.path.exists(test_path):
requirements_file_path = test_path
break
if requirements_file_path:
os.chdir(checkout_path)
ret_dict['requirements'] = run(
'{cmd} install --exists-action=w -r {requirements}'.format(
cmd=project.venv_bin(version=version.slug, bin='pip'),
requirements=requirements_file_path))
# Handle setup.py
os.chdir(project.checkout_path(version.slug))
if os.path.isfile("setup.py"):
if getattr(settings, 'USE_PIP_INSTALL', False):
ret_dict['install'] = run(
'{cmd} install --ignore-installed .'.format(
cmd=project.venv_bin(version=version.slug, bin='pip')))
else:
ret_dict['install'] = run(
'{cmd} setup.py install --force'.format(
cmd=project.venv_bin(version=version.slug,
bin='python')))
else:
ret_dict['install'] = (999, "", "No setup.py, skipping install")
return ret_dict
@task()
def build_docs(version, force, search, localmedia):
"""
This handles the actual building of the documentation
"""
project = version.project
results = {}
before_build.send(sender=version)
with project.repo_nonblockinglock(version=version,
max_lock_age=getattr(settings, 'REPO_LOCK_SECONDS', 30)):
html_builder = get_builder_class(project.documentation_type)(version)
if force:
html_builder.force()
html_builder.append_conf()
results['html'] = html_builder.build()
if results['html'][0] == 0:
html_builder.move()
# Gracefully attempt to move files via task on web workers.
try:
move_files.delay(
version_pk=version.pk,
html=True,
hostname=socket.gethostname(),
)
except socket.error:
pass
fake_results = (999, "Project Skipped, Didn't build",
"Project Skipped, Didn't build")
if 'mkdocs' in project.documentation_type:
if search:
try:
search_builder = get_builder_class('mkdocs_json')(version)
results['search'] = search_builder.build()
if results['search'][0] == 0:
search_builder.move()
except:
log.error(LOG_TEMPLATE.format(
project=project.slug, version=version.slug, msg="JSON Build Error"), exc_info=True)
if 'sphinx' in project.documentation_type:
# Search builder. Creates JSON from docs and sends it to the
# server.
if search:
try:
search_builder = get_builder_class('sphinx_search')(version)
results['search'] = search_builder.build()
if results['search'][0] == 0:
# Copy json for safe keeping
search_builder.move()
except:
log.error(LOG_TEMPLATE.format(
project=project.slug, version=version.slug, msg="JSON Build Error"), exc_info=True)
# Local media builder for singlepage HTML download archive
if localmedia:
try:
localmedia_builder = get_builder_class('sphinx_singlehtmllocalmedia')(version)
results['localmedia'] = localmedia_builder.build()
if results['localmedia'][0] == 0:
localmedia_builder.move()
except:
log.error(LOG_TEMPLATE.format(
project=project.slug, version=version.slug, msg="Local Media HTML Build Error"), exc_info=True)
# Optional build steps
if version.project.slug not in HTML_ONLY and not project.skip:
if project.enable_pdf_build:
pdf_builder = get_builder_class('sphinx_pdf')(version)
results['pdf'] = pdf_builder.build()
# Always move pdf results even when there's an error.
# if pdf_results[0] == 0:
pdf_builder.move()
else:
results['pdf'] = fake_results
if project.enable_epub_build:
epub_builder = get_builder_class('sphinx_epub')(version)
results['epub'] = epub_builder.build()
if results['epub'][0] == 0:
epub_builder.move()
else:
results['epub'] = fake_results
after_build.send(sender=version)
return results
def create_build(build_pk):
"""
Old placeholder for build creation. Now it just gets it from the database.
"""
if build_pk:
build = api_v1.build(build_pk).get()
for key in ['project', 'version', 'resource_uri', 'absolute_uri']:
if key in build:
del build[key]
else:
build = {}
return build
def record_build(record, build, results, state, start_time=None):
"""
Record a build by hitting the API.
Returns nothing
"""
if not record:
return None
build['builder'] = socket.gethostname()
setup_steps = ['checkout', 'venv', 'doc_builder', 'requirements', 'install']
output_steps = ['html']
all_steps = setup_steps + output_steps
build['state'] = state
if 'html' in results:
build['success'] = results['html'][0] == 0
else:
build['success'] = False
# Set global state
# for step in all_steps:
# if results.get(step, False):
# if results.get(step)[0] != 0:
# results['success'] = False
build['exit_code'] = max([results.get(step, [0])[0] for step in all_steps])
build['setup'] = build['setup_error'] = ""
build['output'] = build['error'] = ""
if start_time:
build['length'] = (datetime.datetime.utcnow() - start_time).total_seconds()
for step in setup_steps:
if step in results:
build['setup'] += "\n\n%s\n-----\n\n" % step
try:
build['setup'] += results.get(step)[1]
except (IndexError, TypeError):
pass
build['setup_error'] += "\n\n%s\n-----\n\n" % step
try:
build['setup_error'] += results.get(step)[2]
except (IndexError, TypeError):
pass
for step in output_steps:
if step in results:
build['output'] += "\n\n%s\n-----\n\n" % step
try:
build['output'] += results.get(step)[1]
except (IndexError, TypeError):
pass
build['error'] += "\n\n%s\n-----\n\n" % step
try:
build['error'] += results.get(step)[2]
except (IndexError, TypeError):
pass
# Attempt to stop unicode errors on build reporting
for key, val in build.items():
if isinstance(val, basestring):
build[key] = val.decode('utf-8', 'ignore')
try:
api_v1.build(build['id']).put(build)
except Exception:
log.error("Unable to post a new build", exc_info=True)
def record_pdf(record, results, state, version):
if not record or 'sphinx' not in version.project.documentation_type:
return None
if not version.project.enable_pdf_build:
return None
try:
if 'pdf' in results:
pdf_exit = results['pdf'][0]
pdf_success = pdf_exit == 0
pdf_output = results['pdf'][1]
pdf_error = results['pdf'][2]
else:
pdf_exit = 999
pdf_success = False
pdf_output = pdf_error = "PDF Failed"
pdf_output = pdf_output.decode('utf-8', 'ignore')
pdf_error = pdf_error.decode('utf-8', 'ignore')
if 'Output written on' in pdf_output:
pdf_success = True
api_v1.build.post(dict(
state=state,
project='/api/v1/project/%s/' % version.project.pk,
version='/api/v1/version/%s/' % version.pk,
success=pdf_success,
type='pdf',
output=pdf_output,
error=pdf_error,
exit_code=pdf_exit,
builder=socket.gethostname(),
))
except Exception:
log.error(LOG_TEMPLATE.format(project=version.project.slug,
version=version.slug, msg="Unable to post a new build"), exc_info=True)
###########
# Web tasks
###########
@task(queue='web')
def finish_build(version_pk, build_pk, hostname=None, html=False,
localmedia=False, search=False, pdf=False, epub=False):
"""
Build Finished, do house keeping bits
"""
version = Version.objects.get(pk=version_pk)
build = Build.objects.get(pk=build_pk)
if html:
version.active = True
version.built = True
version.save()
move_files(
version_pk=version_pk,
hostname=hostname,
html=html,
localmedia=localmedia,
search=search,
pdf=pdf,
epub=epub,
)
symlinks.symlink_cnames(version)
symlinks.symlink_translations(version)
symlinks.symlink_subprojects(version)
if version.project.single_version:
symlinks.symlink_single_version(version)
else:
symlinks.remove_symlink_single_version(version)
# Delayed tasks
update_static_metadata.delay(version.project.pk)
fileify.delay(version.pk, commit=build.commit)
update_search.delay(version.pk, commit=build.commit)
if not html and version.slug != STABLE and build.exit_code != 423:
send_notifications.delay(version.pk, build_pk=build.pk)
@task(queue='web')
def move_files(version_pk, hostname, html=False, localmedia=False, search=False, pdf=False, epub=False):
version = Version.objects.get(pk=version_pk)
if html:
from_path = version.project.artifact_path(version=version.slug, type=version.project.documentation_type)
target = version.project.rtd_build_path(version.slug)
Syncer.copy(from_path, target, host=hostname)
if 'sphinx' in version.project.documentation_type:
if localmedia:
from_path = version.project.artifact_path(version=version.slug, type='sphinx_localmedia')
to_path = version.project.get_production_media_path(type='htmlzip', version_slug=version.slug, include_file=False)
Syncer.copy(from_path, to_path, host=hostname)
if search:
from_path = version.project.artifact_path(version=version.slug, type='sphinx_search')
to_path = version.project.get_production_media_path(type='json', version_slug=version.slug, include_file=False)
Syncer.copy(from_path, to_path, host=hostname)
# Always move PDF's because the return code lies.
if pdf:
from_path = version.project.artifact_path(version=version.slug, type='sphinx_pdf')
to_path = version.project.get_production_media_path(type='pdf', version_slug=version.slug, include_file=False)
Syncer.copy(from_path, to_path, host=hostname)
if epub:
from_path = version.project.artifact_path(version=version.slug, type='sphinx_epub')
to_path = version.project.get_production_media_path(type='epub', version_slug=version.slug, include_file=False)
Syncer.copy(from_path, to_path, host=hostname)
if 'mkdocs' in version.project.documentation_type:
if search:
from_path = version.project.artifact_path(version=version.slug, type='mkdocs_json')
to_path = version.project.get_production_media_path(type='json', version_slug=version.slug, include_file=False)
Syncer.copy(from_path, to_path, host=hostname)
@task(queue='web')
def update_search(version_pk, commit):
version = Version.objects.get(pk=version_pk)
if 'sphinx' in version.project.documentation_type:
page_list = process_all_json_files(version, build_dir=False)
elif 'mkdocs' in version.project.documentation_type:
page_list = process_mkdocs_json(version, build_dir=False)
else:
log.error('Unknown documentation type: %s' % version.project.documentation_type)
return
log_msg = ' '.join([page['path'] for page in page_list])
log.info("(Search Index) Sending Data: %s [%s]" % (version.project.slug, log_msg))
index_search_request(
version=version,
page_list=page_list,
commit=commit,
project_scale=0,
page_scale=0,
# Don't index sections to speed up indexing.
# They aren't currently exposed anywhere.
section=False,
)
@task(queue='web')
def fileify(version_pk, commit):
"""
Create ImportedFile objects for all of a version's files.
This is a prereq for indexing the docs for search.
It also causes celery-haystack to kick off an index of the file.
"""
version = Version.objects.get(pk=version_pk)
project = version.project
if not commit:
log.info(LOG_TEMPLATE.format(
project=project.slug, version=version.slug, msg='Imported File not being built because no commit information'))
path = project.rtd_build_path(version.slug)
if path:
log.info(LOG_TEMPLATE.format(
project=project.slug, version=version.slug, msg='Creating ImportedFiles'))
for root, dirnames, filenames in os.walk(path):
for filename in filenames:
if fnmatch.fnmatch(filename, '*.html'):
dirpath = os.path.join(root.replace(path, '').lstrip('/'),
filename.lstrip('/'))
obj, created = ImportedFile.objects.get_or_create(
project=project,
version=version,
path=dirpath,
name=filename,
commit=commit,
)
if not created:
obj.save()
# Delete ImportedFiles from previous versions
ImportedFile.objects.filter(project=project, version=version).exclude(commit=commit).delete()
else:
log.info(LOG_TEMPLATE.format(project=project.slug, version=version.slug, msg='No ImportedFile files'))
@task(queue='web')
def send_notifications(version_pk, build_pk):
version = Version.objects.get(pk=version_pk)
build = Build.objects.get(pk=build_pk)
for hook in version.project.webhook_notifications.all():
webhook_notification(version, build, hook.url)
for email in version.project.emailhook_notifications.all().values_list('email', flat=True):
email_notification(version, build, email)
def email_notification(version, build, email):
log.debug(LOG_TEMPLATE.format(project=version.project.slug, version=version.slug,
msg='sending email to: %s' % email))
context = {'version': version,
'project': version.project,
'build': build,
'build_url': 'https://{0}{1}'.format(
getattr(settings, 'PRODUCTION_DOMAIN', 'readthedocs.org'),
build.get_absolute_url()),
'unsub_url': 'https://{0}{1}'.format(
getattr(settings, 'PRODUCTION_DOMAIN', 'readthedocs.org'),
reverse('projects_notifications', args=[version.project.slug])),
}
if build.commit:
title = _('Failed: {project.name} ({commit})').format(commit=build.commit[:8], **context)
else:
title = _('Failed: {project.name} ({version.verbose_name})').format(**context)
send_email(
email,
title,
template='projects/email/build_failed.txt',
template_html='projects/email/build_failed.html',
context=context
)
def webhook_notification(version, build, hook_url):
data = json.dumps({
'name': project.name,
'slug': project.slug,
'build': {
'id': build.id,
'success': build.success,
'date': build.date.strftime('%Y-%m-%d %H:%M:%S'),
}
})
log.debug(LOG_TEMPLATE.format(project=project.slug, version='', msg='sending notification to: %s' % hook_url))
requests.post(hook_url, data=data)
@task(queue='web')
def update_static_metadata(project_pk, path=None):
"""Update static metadata JSON file
Metadata settings include the following project settings:
version
The default version for the project, default: `latest`
language
The default language for the project, default: `en`
languages
List of languages built by linked translation projects.
"""
project = Project.objects.get(pk=project_pk)
if not path:
path = project.static_metadata_path()
log.info(LOG_TEMPLATE.format(
project=project.slug,
version='',
msg='Updating static metadata',
))
translations = [trans.language for trans in project.translations.all()]
languages = set(translations)
# Convert to JSON safe types
metadata = {
'version': project.default_version,
'language': project.language,
'languages': list(languages),
'single_version': project.single_version,
}
try:
fh = open(path, 'w+')
json.dump(metadata, fh)
fh.close()
Syncer.copy(path, path, host=socket.gethostname(), file=True)
except (AttributeError, IOError) as e:
log.debug(LOG_TEMPLATE.format(
project=project.slug,
version='',
msg='Cannot write to metadata.json: {0}'.format(e)
))
#@periodic_task(run_every=crontab(hour="*", minute="*/5", day_of_week="*"))
def update_docs_pull(record=False, force=False):
"""
A high-level interface that will update all of the projects.
This is mainly used from a cronjob or management command.
"""
for version in Version.objects.filter(built=True):
try:
update_docs(
pk=version.project.pk, version_pk=version.pk, record=record)
except Exception, e:
log.error("update_docs_pull failed", exc_info=True)
##############
# Random Tasks
##############
@task()
def remove_dir(path):
"""
Remove a directory on the build/celery server.
This is mainly a wrapper around shutil.rmtree so that app servers
can kill things on the build server.
"""
log.info("Removing %s" % path)
shutil.rmtree(path)
@task(queue='web')
def clear_artifacts(version_pk):
""" Remove artifacts from the web servers. """
version = Version.objects.get(pk=version_pk)
run_on_app_servers('rm -rf %s' % version.project.get_production_media_path(type='pdf', version_slug=version.slug))
run_on_app_servers('rm -rf %s' % version.project.get_production_media_path(type='epub', version_slug=version.slug))
run_on_app_servers('rm -rf %s' % version.project.get_production_media_path(type='htmlzip', version_slug=version.slug))
run_on_app_servers('rm -rf %s' % version.project.rtd_build_path(version=version.slug))
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: ardydedase
# @Date: 2015-08-30 11:19:30
# @Last Modified by: ardydedase
# @Last Modified time: 2015-09-24 19:36:51
import time
import requests
import logging
import sys
from requests.exceptions import ConnectionError
try:
import lxml.etree as etree
except ImportError:
import xml.etree.ElementTree as etree
def configure_logger(log_level=logging.DEBUG):
logger = logging.getLogger(__name__)
logger.setLevel(log_level)
try:
sa = logging.StreamHandler(stream=sys.stdout)
except TypeError:
sa = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s - %(filename)s:%(lineno)d - %(levelname)s - %(message)s')
sa.setFormatter(formatter)
logger.addHandler(sa)
return logger
log = configure_logger()
class ExceededRetries(Exception):
"""Is thrown when allowed number of polls were
performed but response is not complete yet."""
pass
class EmptyResponse(Exception):
"""Is thrown when API returns an empty response."""
pass
class InvalidResponse(ValueError):
"""Is thrown when API returns a truncated or invalid json response."""
pass
class MissingParameter(KeyError):
"""Is thrown when expected request parameter is missing."""
pass
class InvalidParameter(KeyError):
"""Is thrown when invalid request parameter is present."""
pass
STRICT, GRACEFUL, IGNORE = 'strict', 'graceful', 'ignore'
class APIWrapper(object):
def __init__(self, response_format='json'):
self.response_format = response_format
def _default_resp_callback(self, resp):
if not resp or not resp.content:
raise EmptyResponse('Response has no content.')
try:
parsed_resp = self._parse_resp(resp, self.response_format)
except (ValueError, SyntaxError):
raise ValueError('Invalid %s in response: %s...' %
(self.response_format.upper(),
resp.content[:100]))
return parsed_resp
def make_request(self, url, method='get', headers=None, data=None,
callback=None, errors=STRICT, verify=False, timeout=None, **params):
"""
Reusable method for performing requests.
:param url - URL to request
:param method - request method, default is 'get'
:param headers - request headers
:param data - post data
:param callback - callback to be applied to response,
default callback will parse response as json object.
:param errors - specifies communication errors handling mode, possible
values are:
* strict (default) - throw an error as soon as one
occurred
* graceful - ignore certain errors, e.g. EmptyResponse
* ignore - ignore all errors and return a result in
any case.
NOTE that it DOES NOT mean that no
exceptions can be
raised from this method, it mostly ignores
communication
related errors.
* None or empty string equals to default
:param verify - whether or not to verify SSL cert, default to False
:param timeout - the timeout of the request in second, default to None
:param params - additional query parameters for request
"""
error_modes = (STRICT, GRACEFUL, IGNORE)
error_mode = errors or GRACEFUL
if error_mode.lower() not in error_modes:
raise ValueError(
'Possible values for errors argument are: %s'
% ','.join(error_modes))
if callback is None:
callback = self._default_resp_callback
request = getattr(requests, method.lower())
log.debug('* Request URL: %s' % url)
log.debug('* Request method: %s' % method)
log.debug('* Request query params: %s' % params)
log.debug('* Request headers: %s' % headers)
log.debug('* Request timeout: %s' % timeout)
r = request(
url, headers=headers, data=data, verify=verify, timeout=timeout, params=params)
log.debug('* r.url: %s' % r.url)
try:
r.raise_for_status()
return callback(r)
except Exception as e:
return self._with_error_handling(r, e,
error_mode, self.response_format)
def _headers(self):
return {'Accept': 'application/%s' % self.response_format}
@staticmethod
def _parse_resp(resp, response_format):
resp.parsed = etree.fromstring(
resp.content) if response_format == 'xml' else resp.json()
return resp
@staticmethod
def _with_error_handling(resp, error, mode, response_format):
"""
Static method for error handling.
:param resp - API response
:param error - Error thrown
:param mode - Error mode
:param response_format - XML or json
"""
def safe_parse(r):
try:
return APIWrapper._parse_resp(r, response_format)
except (ValueError, SyntaxError) as ex:
log.error(ex)
r.parsed = None
return r
if isinstance(error, requests.HTTPError):
if resp.status_code == 400:
# It means that request parameters were rejected by the server,
# so we need to enrich standard error message
# with 'ValidationErrors'
# from the response
resp = safe_parse(resp)
if resp.parsed is not None:
parsed_resp = resp.parsed
messages = []
if response_format == 'xml' and\
parsed_resp.find('./ValidationErrors') is not None:
messages = [e.find('./Message').text
for e in parsed_resp.findall('./ValidationErrors/ValidationErrorDto')]
elif response_format == 'json' and 'ValidationErrors' in parsed_resp:
messages = [e['Message']
for e in parsed_resp['ValidationErrors']]
error = requests.HTTPError(
'%s: %s' % (error, '\n\t'.join(messages)), response=resp)
elif resp.status_code == 429:
error = requests.HTTPError('%sToo many requests in the last minute.' % error,
response=resp)
if STRICT == mode:
raise error
elif GRACEFUL == mode:
if isinstance(error, EmptyResponse):
# Empty response is returned by the API occasionally,
# in this case it makes sense to ignore it and retry.
log.warning(error)
resp.parsed = None
return resp
elif isinstance(error, requests.HTTPError):
# Ignoring 'Too many requests' error,
# since subsequent retries will come after a delay.
if resp.status_code == 429: # Too many requests
log.warning(error)
return safe_parse(resp)
else:
raise error
else:
raise error
else:
# ignore everything, just log it and return whatever response we
# have
log.error(error)
return safe_parse(resp)
def poll(self, url, initial_delay=2, delay=1, tries=20, errors=STRICT, is_complete_callback=None, **params):
"""
Poll the URL
:param url - URL to poll, should be returned by 'create_session' call
:param initial_delay - specifies how many seconds to wait before the first poll
:param delay - specifies how many seconds to wait between the polls
:param tries - number of polls to perform
:param errors - errors handling mode, see corresponding parameter in 'make_request' method
:param params - additional query params for each poll request
"""
time.sleep(initial_delay)
poll_response = None
if is_complete_callback == None:
is_complete_callback = self._default_poll_callback
for n in range(tries):
poll_response = self.make_request(url, headers=self._headers(),
errors=errors, **params)
if is_complete_callback(poll_response):
return poll_response
else:
time.sleep(delay)
if STRICT == errors:
raise ExceededRetries(
"Failed to poll within {0} tries.".format(tries))
else:
return poll_response
def _default_poll_callback(self, poll_resp):
"""
Checks the condition in poll response to determine if it is complete
and no subsequent poll requests should be done.
"""
if poll_resp.parsed is None:
return False
success_list = ['UpdatesComplete', True, 'COMPLETE']
status = None
if self.response_format == 'xml':
status = poll_resp.parsed.find('./Status').text
elif self.response_format == 'json':
status = poll_resp.parsed.get(
'Status', poll_resp.parsed.get('status'))
if status is None:
raise RuntimeError('Unable to get poll response status.')
return status in success_list
| |
# Copyright 2017 Taylor DeHaan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the DirectoryExplorer class.
The DirectoryExplorer class is used to recursively explore files and
directories.
"""
from collections import deque
import os
import re
from .directory_tree import DirectoryTree
class DirectoryExplorer(object):
"""Recursively explores files and directories.
This class exposes an explore() method which performs a breadth-first search
through directories and returns the results. The invoker can specify whether
hidden files and directories are displayed and can set a maximum recursion
level.
Args:
start_dir (str): The path to the directory where the BFS will originate.
show_hidden (bool): When false, filter out hidden files and directories.
Hidden files and directories start with a "."
recursion_limit (int): The number of recursion levels that can should be
explored.
Attributes:
_start_dir (str): The path to the directory where the BFS will
originate.
_show_hidden (bool): When false, filter out hidden files and
directories.
_recursion_limit (int): The number of recursion levels that will be
explored.
"""
def __init__(self, start_dir=".", show_hidden=False, recursion_limit=10):
self._start_dir = start_dir
self._show_hidden = show_hidden
self._recursion_limit = recursion_limit
def _sort_and_filter(self, raw_list, root):
"""Sorts and filters a list of files and directories.
Takes a list of directories and files, parses each entry sorting it
based on whether it's a file, directory, or symlink. Filters out hidden
files if show_hidden is False. Returns a tuple of file, directory, and
symlink lists.
Args:
raw_list (list): List containing string directory and file names.
root (str): Root path to the entries in the list.
Returns:
A tuple of lists, the first for files, the second for directories,
and the last entry for symbolic links.
"""
# Initialize lists for files, directories, and symlinks
files = []
directories = []
symlinks = []
# Lexicographically sort the raw list
lex_ordered_list = sorted(raw_list, key=str.lower)
# Iterate through each entry in the file/directory list
for entry in lex_ordered_list:
# If show hidden is false and the entry starts with a '.', continue
# to the next entry
if not self._show_hidden and re.match(r"\..*", entry):
continue
# Determine entry type and append to respective list
full_path = os.path.join(root, entry)
if os.path.islink(full_path):
symlinks.append((entry, os.path.realpath(full_path)))
elif os.path.isfile(full_path):
files.append(entry)
else:
directories.append(entry)
# Return tuple of the file, directory, and symlink lists
return files, directories, symlinks
def explore(self):
"""Performs a breadth-first search on directory contents.
The search starts from from the _start_dir and will continue until
either there are no more directories to explore or the recursion limit
has been met.
Returns:
A list of tuples. The tuples consist of two lists; the first
containing directories and the second containing files. Each entry
in the list represents the contents of a directory.
"""
# Initialize results and recursion_level variables
results = []
recursion_level = 0
# Initialize the two queues for the current level and the next level
current_level = deque()
next_level = deque()
# Add the start directory to the current level
current_level.append(self._start_dir)
# Loop while the current level queue is not empty
while len(current_level) != 0:
# Pop the current directory from the top of the queue
current_dir = current_level.popleft()
# Use os.listdir to get a list of all files & directories inside of
# the current_dir
try:
listdir_result = os.listdir(current_dir)
except OSError:
# We don't have permission to read this directory so move on
continue
# Sort and filter the results from listdir
files, directories, _ = self._sort_and_filter(listdir_result,
current_dir)
# Add a tuple of the sorted directories and files to the results
results.append((directories, files))
# If the recursion level is at the limit, continue
if recursion_level == self._recursion_limit:
continue
# For each directory inside of current_dir, add the absolute path
# to the next level queue
for directory in directories:
next_level.append(os.path.join(current_dir, directory))
# If the current level queue is empty and we are still below the
# recursion limit, set the current level queue equal to the next
# level queue and increment the recursion level
if len(current_level) == 0 and \
recursion_level < self._recursion_limit:
current_level = next_level
next_level = deque()
recursion_level += 1
return results
def build_tree(self):
"""Builds a DirectoryTree by BFSing through directory contents.
This method is very similar to explore() except that this method builds
and returns a DirectoryTree object instead of alist of tuples.
Returns:
A DirectoryTree object.
"""
# Initialize result and recursion_level variables
result = DirectoryTree(self._start_dir)
recursion_level = 0
# Initialize the two queues for the current level and the next level
current_level = deque()
next_level = deque()
# Add the start directory and root to the current level
current_level.append((self._start_dir, result.get_root()))
# Loop while the current level queue is not empty
while len(current_level) != 0:
# Pop the current directory and node from the top of the queue
current_dir, current_node = current_level.popleft()
# Use os.listdir to get a list of all files & directories inside of
# the current_dir
try:
listdir_result = os.listdir(current_dir)
except OSError:
# We don't have permission to read this directory so move on
continue
# Sort and filter the results from listdir
files, directories, symlinks = self._sort_and_filter(listdir_result,
current_dir)
# Add files to node
current_node.add_files(files)
# Add symlinks to node
current_node.add_symlinks(symlinks)
# For each directory inside of current_dir, add child node
for directory in directories:
current_node.add_child(directory)
# Append dirname + node tuple to next level queue if recursion
# limit has not been reached
if recursion_level != self._recursion_limit:
next_level.append((os.path.join(current_dir, directory),
current_node.get_last_child()))
# If the current level queue is empty and we are still below the
# recursion limit, set the current level queue equal to the next
# level queue and increment the recursion level
if len(current_level) == 0 and \
recursion_level < self._recursion_limit:
current_level = next_level
next_level = deque()
recursion_level += 1
return result
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import shutil
import tempfile
import time
from pyspark.sql.functions import lit
from pyspark.sql.types import StructType, StructField, IntegerType, StringType
from pyspark.testing.sqlutils import ReusedSQLTestCase
class StreamingTests(ReusedSQLTestCase):
def test_stream_trigger(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
# Should take at least one arg
try:
df.writeStream.trigger()
except ValueError:
pass
# Should not take multiple args
try:
df.writeStream.trigger(once=True, processingTime='5 seconds')
except ValueError:
pass
# Should not take multiple args
try:
df.writeStream.trigger(processingTime='5 seconds', continuous='1 second')
except ValueError:
pass
# Should take only keyword args
try:
df.writeStream.trigger('5 seconds')
self.fail("Should have thrown an exception")
except TypeError:
pass
def test_stream_read_options(self):
schema = StructType([StructField("data", StringType(), False)])
df = self.spark.readStream\
.format('text')\
.option('path', 'python/test_support/sql/streaming')\
.schema(schema)\
.load()
self.assertTrue(df.isStreaming)
self.assertEqual(df.schema.simpleString(), "struct<data:string>")
def test_stream_read_options_overwrite(self):
bad_schema = StructType([StructField("test", IntegerType(), False)])
schema = StructType([StructField("data", StringType(), False)])
# SPARK-32516 disables the overwrite behavior by default.
with self.sql_conf({"spark.sql.legacy.pathOptionBehavior.enabled": True}):
df = self.spark.readStream.format('csv')\
.option('path', 'python/test_support/sql/fake')\
.schema(bad_schema)\
.load(path='python/test_support/sql/streaming', schema=schema, format='text')
self.assertTrue(df.isStreaming)
self.assertEqual(df.schema.simpleString(), "struct<data:string>")
def test_stream_save_options(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming') \
.withColumn('id', lit(1))
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream.option('checkpointLocation', chk).queryName('this_query') \
.format('parquet').partitionBy('id').outputMode('append').option('path', out).start()
try:
self.assertEqual(q.name, 'this_query')
self.assertTrue(q.isActive)
q.processAllAvailable()
output_files = []
for _, _, files in os.walk(out):
output_files.extend([f for f in files if not f.startswith('.')])
self.assertTrue(len(output_files) > 0)
self.assertTrue(len(os.listdir(chk)) > 0)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_save_options_overwrite(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
fake1 = os.path.join(tmpPath, 'fake1')
fake2 = os.path.join(tmpPath, 'fake2')
# SPARK-32516 disables the overwrite behavior by default.
with self.sql_conf({"spark.sql.legacy.pathOptionBehavior.enabled": True}):
q = df.writeStream.option('checkpointLocation', fake1)\
.format('memory').option('path', fake2) \
.queryName('fake_query').outputMode('append') \
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertEqual(q.name, 'this_query')
self.assertTrue(q.isActive)
q.processAllAvailable()
output_files = []
for _, _, files in os.walk(out):
output_files.extend([f for f in files if not f.startswith('.')])
self.assertTrue(len(output_files) > 0)
self.assertTrue(len(os.listdir(chk)) > 0)
self.assertFalse(os.path.isdir(fake1)) # should not have been created
self.assertFalse(os.path.isdir(fake2)) # should not have been created
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_status_and_progress(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
def func(x):
time.sleep(1)
return x
from pyspark.sql.functions import col, udf
sleep_udf = udf(func)
# Use "sleep_udf" to delay the progress update so that we can test `lastProgress` when there
# were no updates.
q = df.select(sleep_udf(col("value")).alias('value')).writeStream \
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
# "lastProgress" will return None in most cases. However, as it may be flaky when
# Jenkins is very slow, we don't assert it. If there is something wrong, "lastProgress"
# may throw error with a high chance and make this test flaky, so we should still be
# able to detect broken codes.
q.lastProgress
q.processAllAvailable()
lastProgress = q.lastProgress
recentProgress = q.recentProgress
status = q.status
self.assertEqual(lastProgress['name'], q.name)
self.assertEqual(lastProgress['id'], q.id)
self.assertTrue(any(p == lastProgress for p in recentProgress))
self.assertTrue(
"message" in status and
"isDataAvailable" in status and
"isTriggerActive" in status)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_await_termination(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream\
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertTrue(q.isActive)
try:
q.awaitTermination("hello")
self.fail("Expected a value exception")
except ValueError:
pass
now = time.time()
# test should take at least 2 seconds
res = q.awaitTermination(2.6)
duration = time.time() - now
self.assertTrue(duration >= 2)
self.assertFalse(res)
finally:
q.processAllAvailable()
q.stop()
shutil.rmtree(tmpPath)
def test_stream_exception(self):
sdf = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
sq = sdf.writeStream.format('memory').queryName('query_explain').start()
try:
sq.processAllAvailable()
self.assertEqual(sq.exception(), None)
finally:
sq.stop()
from pyspark.sql.functions import col, udf
from pyspark.sql.utils import StreamingQueryException
bad_udf = udf(lambda x: 1 / 0)
sq = sdf.select(bad_udf(col("value")))\
.writeStream\
.format('memory')\
.queryName('this_query')\
.start()
try:
# Process some data to fail the query
sq.processAllAvailable()
self.fail("bad udf should fail the query")
except StreamingQueryException as e:
# This is expected
self._assert_exception_tree_contains_msg(e, "ZeroDivisionError")
finally:
sq.stop()
self.assertTrue(type(sq.exception()) is StreamingQueryException)
self._assert_exception_tree_contains_msg(sq.exception(), "ZeroDivisionError")
def _assert_exception_tree_contains_msg(self, exception, msg):
e = exception
contains = msg in e.desc
while e.cause is not None and not contains:
e = e.cause
contains = msg in e.desc
self.assertTrue(contains, "Exception tree doesn't contain the expected message: %s" % msg)
def test_query_manager_await_termination(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream\
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertTrue(q.isActive)
try:
self.spark._wrapped.streams.awaitAnyTermination("hello")
self.fail("Expected a value exception")
except ValueError:
pass
now = time.time()
# test should take at least 2 seconds
res = self.spark._wrapped.streams.awaitAnyTermination(2.6)
duration = time.time() - now
self.assertTrue(duration >= 2)
self.assertFalse(res)
finally:
q.processAllAvailable()
q.stop()
shutil.rmtree(tmpPath)
class ForeachWriterTester:
def __init__(self, spark):
self.spark = spark
def write_open_event(self, partitionId, epochId):
self._write_event(
self.open_events_dir,
{'partition': partitionId, 'epoch': epochId})
def write_process_event(self, row):
self._write_event(self.process_events_dir, {'value': 'text'})
def write_close_event(self, error):
self._write_event(self.close_events_dir, {'error': str(error)})
def write_input_file(self):
self._write_event(self.input_dir, "text")
def open_events(self):
return self._read_events(self.open_events_dir, 'partition INT, epoch INT')
def process_events(self):
return self._read_events(self.process_events_dir, 'value STRING')
def close_events(self):
return self._read_events(self.close_events_dir, 'error STRING')
def run_streaming_query_on_writer(self, writer, num_files):
self._reset()
try:
sdf = self.spark.readStream.format('text').load(self.input_dir)
sq = sdf.writeStream.foreach(writer).start()
for i in range(num_files):
self.write_input_file()
sq.processAllAvailable()
finally:
self.stop_all()
def assert_invalid_writer(self, writer, msg=None):
self._reset()
try:
sdf = self.spark.readStream.format('text').load(self.input_dir)
sq = sdf.writeStream.foreach(writer).start()
self.write_input_file()
sq.processAllAvailable()
self.fail("invalid writer %s did not fail the query" % str(writer)) # not expected
except Exception as e:
if msg:
assert msg in str(e), "%s not in %s" % (msg, str(e))
finally:
self.stop_all()
def stop_all(self):
for q in self.spark._wrapped.streams.active:
q.stop()
def _reset(self):
self.input_dir = tempfile.mkdtemp()
self.open_events_dir = tempfile.mkdtemp()
self.process_events_dir = tempfile.mkdtemp()
self.close_events_dir = tempfile.mkdtemp()
def _read_events(self, dir, json):
rows = self.spark.read.schema(json).json(dir).collect()
dicts = [row.asDict() for row in rows]
return dicts
def _write_event(self, dir, event):
import uuid
with open(os.path.join(dir, str(uuid.uuid4())), 'w') as f:
f.write("%s\n" % str(event))
def __getstate__(self):
return (self.open_events_dir, self.process_events_dir, self.close_events_dir)
def __setstate__(self, state):
self.open_events_dir, self.process_events_dir, self.close_events_dir = state
# Those foreach tests are failed in Python 3.6 and macOS High Sierra by defined rules
# at http://sealiesoftware.com/blog/archive/2017/6/5/Objective-C_and_fork_in_macOS_1013.html
# To work around this, OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES.
def test_streaming_foreach_with_simple_function(self):
tester = self.ForeachWriterTester(self.spark)
def foreach_func(row):
tester.write_process_event(row)
tester.run_streaming_query_on_writer(foreach_func, 2)
self.assertEqual(len(tester.process_events()), 2)
def test_streaming_foreach_with_basic_open_process_close(self):
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def open(self, partitionId, epochId):
tester.write_open_event(partitionId, epochId)
return True
def process(self, row):
tester.write_process_event(row)
def close(self, error):
tester.write_close_event(error)
tester.run_streaming_query_on_writer(ForeachWriter(), 2)
open_events = tester.open_events()
self.assertEqual(len(open_events), 2)
self.assertSetEqual(set([e['epoch'] for e in open_events]), {0, 1})
self.assertEqual(len(tester.process_events()), 2)
close_events = tester.close_events()
self.assertEqual(len(close_events), 2)
self.assertSetEqual(set([e['error'] for e in close_events]), {'None'})
def test_streaming_foreach_with_open_returning_false(self):
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def open(self, partition_id, epoch_id):
tester.write_open_event(partition_id, epoch_id)
return False
def process(self, row):
tester.write_process_event(row)
def close(self, error):
tester.write_close_event(error)
tester.run_streaming_query_on_writer(ForeachWriter(), 2)
self.assertEqual(len(tester.open_events()), 2)
self.assertEqual(len(tester.process_events()), 0) # no row was processed
close_events = tester.close_events()
self.assertEqual(len(close_events), 2)
self.assertSetEqual(set([e['error'] for e in close_events]), {'None'})
def test_streaming_foreach_without_open_method(self):
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def process(self, row):
tester.write_process_event(row)
def close(self, error):
tester.write_close_event(error)
tester.run_streaming_query_on_writer(ForeachWriter(), 2)
self.assertEqual(len(tester.open_events()), 0) # no open events
self.assertEqual(len(tester.process_events()), 2)
self.assertEqual(len(tester.close_events()), 2)
def test_streaming_foreach_without_close_method(self):
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def open(self, partition_id, epoch_id):
tester.write_open_event(partition_id, epoch_id)
return True
def process(self, row):
tester.write_process_event(row)
tester.run_streaming_query_on_writer(ForeachWriter(), 2)
self.assertEqual(len(tester.open_events()), 2) # no open events
self.assertEqual(len(tester.process_events()), 2)
self.assertEqual(len(tester.close_events()), 0)
def test_streaming_foreach_without_open_and_close_methods(self):
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def process(self, row):
tester.write_process_event(row)
tester.run_streaming_query_on_writer(ForeachWriter(), 2)
self.assertEqual(len(tester.open_events()), 0) # no open events
self.assertEqual(len(tester.process_events()), 2)
self.assertEqual(len(tester.close_events()), 0)
def test_streaming_foreach_with_process_throwing_error(self):
from pyspark.sql.utils import StreamingQueryException
tester = self.ForeachWriterTester(self.spark)
class ForeachWriter:
def process(self, row):
raise Exception("test error")
def close(self, error):
tester.write_close_event(error)
try:
tester.run_streaming_query_on_writer(ForeachWriter(), 1)
self.fail("bad writer did not fail the query") # this is not expected
except StreamingQueryException as e:
# TODO: Verify whether original error message is inside the exception
pass
self.assertEqual(len(tester.process_events()), 0) # no row was processed
close_events = tester.close_events()
self.assertEqual(len(close_events), 1)
# TODO: Verify whether original error message is inside the exception
def test_streaming_foreach_with_invalid_writers(self):
tester = self.ForeachWriterTester(self.spark)
def func_with_iterator_input(iter):
for x in iter:
print(x)
tester.assert_invalid_writer(func_with_iterator_input)
class WriterWithoutProcess:
def open(self, partition):
pass
tester.assert_invalid_writer(WriterWithoutProcess(), "does not have a 'process'")
class WriterWithNonCallableProcess():
process = True
tester.assert_invalid_writer(WriterWithNonCallableProcess(),
"'process' in provided object is not callable")
class WriterWithNoParamProcess():
def process(self):
pass
tester.assert_invalid_writer(WriterWithNoParamProcess())
# Abstract class for tests below
class WithProcess():
def process(self, row):
pass
class WriterWithNonCallableOpen(WithProcess):
open = True
tester.assert_invalid_writer(WriterWithNonCallableOpen(),
"'open' in provided object is not callable")
class WriterWithNoParamOpen(WithProcess):
def open(self):
pass
tester.assert_invalid_writer(WriterWithNoParamOpen())
class WriterWithNonCallableClose(WithProcess):
close = True
tester.assert_invalid_writer(WriterWithNonCallableClose(),
"'close' in provided object is not callable")
def test_streaming_foreachBatch(self):
q = None
collected = dict()
def collectBatch(batch_df, batch_id):
collected[batch_id] = batch_df.collect()
try:
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
q = df.writeStream.foreachBatch(collectBatch).start()
q.processAllAvailable()
self.assertTrue(0 in collected)
self.assertTrue(len(collected[0]), 2)
finally:
if q:
q.stop()
def test_streaming_foreachBatch_propagates_python_errors(self):
from pyspark.sql.utils import StreamingQueryException
q = None
def collectBatch(df, id):
raise Exception("this should fail the query")
try:
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
q = df.writeStream.foreachBatch(collectBatch).start()
q.processAllAvailable()
self.fail("Expected a failure")
except StreamingQueryException as e:
self.assertTrue("this should fail" in str(e))
finally:
if q:
q.stop()
if __name__ == "__main__":
import unittest
from pyspark.sql.tests.test_streaming import * # noqa: F401
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| |
# Simple binary linear classifier with synthetic data.
#
# Eli Bendersky (http://eli.thegreenplace.net)
# This code is in the public domain
from __future__ import print_function
import argparse
import matplotlib.pyplot as plt
import numpy as np
from timer import Timer
import sys
from regression_lib import *
def generate_data(k, num_neg_outliers=0):
"""Generates k data items with correct labels (+1 or -1) for each item.
k: number of data items to generate.
num_neg_outliers: number of outliers for the negative samples.
Returns X (k, 2) - k data items in 2D, and y (k, 1) - the correct label
(+1 or -1) for each data item in X.
"""
kneg, kpos = k / 2, k / 2
kneg_regular = kneg - num_neg_outliers
# Generate positive data items and negative data items; for negatives, the
# "regulars" are generated using different parameters from "outliers".
positives = (np.full((kpos, 2), 3.0) +
np.random.normal(scale=0.9, size=(kpos, 2)))
outliers = (np.hstack((np.ones((num_neg_outliers, 1)) * 3,
np.ones((num_neg_outliers, 1)) * 5)) +
np.random.normal(scale=0.8, size=(num_neg_outliers, 2)))
negatives = (np.full((kneg_regular, 2), 1.0) +
np.random.normal(scale=0.7, size=(kneg_regular, 2)))
# Stack all items into the same array. To match y, first come all the
# positives then all the negatives.
X = np.vstack((positives, negatives, outliers))
# Create labels. We have kpos +1s followed by kneg -1s.
y = np.vstack((np.full((kpos, 1), 1.0), np.full((kneg, 1), -1.0)))
# Stack X and y together so we can shuffle them together.
Xy = np.random.permutation(np.hstack((X, y)))
return Xy[:, 0:2], Xy[:, 2].reshape(-1, 1)
def plot_data_scatterplot(X, y, thetas=[]):
"""Plots data as a scatterplot, with contour lines for thetas.
X: (k, 2) data items.
y: (k, 1) result (+1 or -1) for each data item in X.
thetas: list of (theta array, label) pairs to plot contours.
Plots +1 data points as a green x, -1 as red o.
"""
fig, ax = plt.subplots()
fig.set_tight_layout(True)
pos = [(X[k, 0], X[k, 1]) for k in range(X.shape[0]) if y[k, 0] == 1]
neg = [(X[k, 0], X[k, 1]) for k in range(X.shape[0]) if y[k, 0] == -1]
ax.scatter(*zip(*pos), c='darkgreen', marker='x')
ax.scatter(*zip(*neg), c='red', marker='o', linewidths=0)
colors = iter(('blue', 'purple', 'black'))
contours = []
for theta, _ in thetas:
xs = np.linspace(-2, 6, 200)
ys = np.linspace(-2, 6, 200)
xsgrid, ysgrid = np.meshgrid(xs, ys)
plane = np.zeros_like(xsgrid)
for i in range(xsgrid.shape[0]):
for j in range(xsgrid.shape[1]):
plane[i, j] = np.array([1, xsgrid[i, j], ysgrid[i, j]]).dot(
theta)
contours.append(ax.contour(xsgrid, ysgrid, plane,
colors=colors.next(), levels=[0]))
if thetas:
plt.legend([cs.collections[0] for cs in contours],
[label for theta, label in thetas])
fig.savefig('binary.png', dpi=80)
plt.show()
def L01_loss(X, y, theta):
"""Computes the L0/1 loss for the data X using theta.
X: (k, n) k rows of data items, each having n features; augmented.
y: (k, 1) correct classifications (+1 or -1) for each item.
theta: (n, 1) regression parameters.
Returns the total L0/1 loss over the whole data set. The total L0/1 loss
is the number of mispredicted items (where y doesn't match yhat).
"""
results = predict_binary(X, theta)
return np.count_nonzero(results != y)
def search_best_L01_loss(X, y, theta_start=None,
npoints_per_t=150, tmargin=0.1):
"""Hacky exhaustive search for the best L0/1 loss for given X and y.
X: (k, n) data items.
y: (k, 1) result (+1 or -1) for each data item in X.
theta_start: (3, 1) theta to start search from.
npoints_per_t: number of points to search per dimension of theta.
tmargin: search within [-tmargin, tmargin] of theta_start.
Since the search is combinatorial, it is slow and works best when we begin
with a reasonable good theta. When theta is already close to optimal, this
search will do a good job finding the best theta in its vicinity. A
realistic approach which I didn't commit to code (but it could be easily
done) is to run this search on multiple "zoom" levels (kinda like simulated
annealing).
Returns a pair best_theta, best_loss.
"""
if theta_start is None:
theta_start = np.array([[1], [1], [1]])
assert theta_start.shape == (3, 1)
k = X.shape[0]
best_loss = k
best_theta = theta_start
t0_range = np.linspace(theta_start[0, 0] + tmargin,
theta_start[0, 0] - tmargin,
npoints_per_t)
t1_range = np.linspace(theta_start[1, 0] + tmargin,
theta_start[1, 0] - tmargin,
npoints_per_t)
t2_range = np.linspace(theta_start[2, 0] + tmargin,
theta_start[2, 0] - tmargin,
npoints_per_t)
for t0 in t0_range:
for t1 in t1_range:
for t2 in t2_range:
theta = np.array([[t0], [t1], [t2]])
loss = L01_loss(X, y, theta)
if loss < best_loss:
best_loss = loss
best_theta = theta
return best_theta, best_loss
def run_gradient_descent_search(X, y, lossfunc, max_nsteps, learning_rate,
verbose=False):
"""Helper function to run GD search for the given data and loss function.
For help on arguments, see the gradient_descent function. max_nsteps is like
nsteps except that this function will stop once the loss isn't changing much
any more, which may take fewer than max_nsteps steps.
"""
n = X.shape[1]
gradient_descent_iter = gradient_descent(X, y,
init_theta=np.random.randn(n, 1),
lossfunc=lossfunc,
nsteps=max_nsteps,
learning_rate=learning_rate)
# Run GD until the changes in loss converge to some small value, or until
# max_nstepsis reached.
prev_loss = sys.float_info.max
converge_step = 0
for i, (theta, loss) in enumerate(gradient_descent_iter):
if verbose:
print(i, ':', loss)
# Convergence of loss beneath a small threshold: this threshold can also
# be made configurable, if needed.
if abs(loss - prev_loss) < 1e-5:
converge_step = i
break
prev_loss = loss
print('... loss converged at step {0}'.format(converge_step))
return theta
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument('--plot', action='store_true', default=False,
help='Produce scatterplot with fit contours')
argparser.add_argument('--search01', action='store_true', default=False,
help='Run combinatorial search for best L0/1 loss')
argparser.add_argument('--verbose-gd', action='store_true', default=False,
help='Verbose output from gradient-descent search')
argparser.add_argument('--normalize', action='store_true', default=False,
help='Normalize data: (x-mu)/sigma.')
args = argparser.parse_args()
# For reproducibility
np.random.seed(42)
X_train, y_train = generate_data(400, num_neg_outliers=10)
print('X_train shape:', X_train.shape)
print('y_train shape:', y_train.shape)
if args.normalize:
X_train_normalized, mu, sigma = feature_normalize(X_train)
else:
X_train_normalized, mu, sigma = X_train, 0, 1
X_train_augmented = augment_1s_column(X_train)
print('X_train_augmented shape:', X_train_augmented.shape)
# A pretty good theta determined by a long run of search_best_L01_loss with
# coarse granularity. Works for the seed set above. For different data,
# we'll need to find a new theta for good L01 loss.
theta = np.array([-0.9647, 0.2545, 0.2416]).reshape(-1, 1)
print('Initial theta:\n', theta)
print('Initial loss:', L01_loss(X_train_augmented, y_train, theta))
if args.search01:
with Timer('searching for best L01 loss'):
best_theta, best_loss = search_best_L01_loss(X_train_augmented,
y_train,
theta)
print('Best theta:\n', best_theta)
print('Best loss:', best_loss)
else:
best_theta, best_loss = theta, L01_loss(X_train_augmented, y_train,
theta)
# Run GD with square loss.
square_nsteps = 5000
square_learning_rate = 0.01
print('Running GD with square loss for {0} steps, learning_rate={1}'.format(
square_nsteps, square_learning_rate))
theta_square = run_gradient_descent_search(
X_train_augmented,
y_train,
lossfunc=square_loss,
max_nsteps=square_nsteps,
learning_rate=square_learning_rate,
verbose=args.verbose_gd)
print('Found theta:\n', theta_square)
print('0/1 loss:', L01_loss(X_train_augmented, y_train, theta_square))
# Run GD with hinge loss.
hinge_nsteps = 5000
hinge_learning_rate = 0.01
print('Running GD with hinge loss for {0} steps, learning_rate={1}'.format(
hinge_nsteps, hinge_learning_rate))
theta_hinge = run_gradient_descent_search(
X_train_augmented,
y_train,
lossfunc=hinge_loss,
max_nsteps=hinge_nsteps,
learning_rate=hinge_learning_rate,
verbose=args.verbose_gd)
print('Found theta:\n', theta_hinge)
print('0/1 loss:', L01_loss(X_train_augmented, y_train, theta_hinge))
if args.plot:
plot_data_scatterplot(X_train, y_train,
[(best_theta, '$L_{01}$'),
(theta_square, '$L_2$'),
(theta_hinge, '$L_h$')])
| |
# Obtained from http://pypi.python.org/pypi/webcolors/
# To be removed from Weekview package.
"""
A simple library for working with the color names and color codes
defined by the HTML and CSS specifications.
An overview of HTML and CSS colors
----------------------------------
Colors on the Web are specified in `the sRGB color space`_, where each
color is made up of a red component, a green component and a blue
component. This is useful because it maps (fairly) cleanly to the red,
green and blue components of pixels on a computer display, and to the
cone cells of a human eye, which come in three sets roughly
corresponding to the wavelengths of light associated with red, green
and blue.
`The HTML 4 standard`_ defines two ways to specify sRGB colors:
* A hash mark ('#') followed by three pairs of hexdecimal digits,
specifying values for red, green and blue components in that order;
for example, ``#0099cc``. Since each pair of hexadecimal digits can
express 256 different values, this allows up to 256**3 or 16,777,216
unique colors to be specified (though, due to differences in display
technology, not all of these colors may be clearly distinguished on
any given physical display).
* A set of predefined color names which correspond to specific
hexadecimal values; for example, ``white``. HTML 4 defines sixteen
such colors.
`The CSS 2 standard`_ allows any valid HTML 4 color specification, and
adds three new ways to specify sRGB colors:
* A hash mark followed by three hexadecimal digits, which is expanded
into three hexadecimal pairs by repeating each digit; thus ``#09c``
is equivalent to ``#0099cc``.
* The string 'rgb', followed by parentheses, between which are three
numeric values each between 0 and 255, inclusive, which are taken to
be the values of the red, green and blue components in that order;
for example, ``rgb(0, 153, 204)``.
* The same as above, except using percentages instead of numeric
values; for example, ``rgb(0%, 60%, 80%)``.
`The CSS 2.1 revision`_ does not add any new methods of specifying
sRGB colors, but does add one additional named color.
`The CSS 3 color module`_ (currently a W3C Candidate Recommendation)
adds one new way to specify sRGB colors:
* A hue-saturation-lightness triple (HSL), using the construct
``hsl()``.
It also adds support for variable opacity of colors, by allowing the
specification of alpha-channel information, through the ``rgba()`` and
``hsla()`` constructs, which are identical to ``rgb()`` and ``hsl()``
with one exception: a fourth value is supplied, indicating the level
of opacity from ``0.0`` (completely transparent) to ``1.0``
(completely opaque). Though not technically a color, the keyword
``transparent`` is also made available in lieu of a color value, and
corresponds to ``rgba(0,0,0,0)``.
Additionally, CSS3 defines a new set of color names; this set is taken
directly from the named colors defined for SVG (Scalable Vector
Graphics) markup, and is a proper superset of the named colors defined
in CSS 2.1. This set also has significant overlap with traditional X11
color sets as defined by the ``rgb.txt`` file on many Unix and
Unix-like operating systems, though the correspondence is not exact;
the set of X11 colors is not standardized, and the set of CSS3 colors
contains some definitions which diverge significantly from customary
X11 definitions (for example, CSS3's ``green`` is not equivalent to
X11's ``green``; the value which X11 designates ``green`` is
designated ``lime`` in CSS3).
.. _the sRGB color space: http://www.w3.org/Graphics/Color/sRGB
.. _The HTML 4 standard: http://www.w3.org/TR/html401/types.html#h-6.5
.. _The CSS 2 standard: http://www.w3.org/TR/REC-CSS2/syndata.html#value-def-color
.. _The CSS 2.1 revision: http://www.w3.org/TR/CSS21/
.. _The CSS 3 color module: http://www.w3.org/TR/css3-color/
What this module supports
-------------------------
The mappings and functions within this module support the following
methods of specifying sRGB colors, and conversions between them:
* Six-digit hexadecimal.
* Three-digit hexadecimal.
* Integer ``rgb()`` triplet.
* Percentage ``rgb()`` triplet.
* Varying selections of predefined color names (see below).
This module does not support ``hsl()`` triplets, nor does it support
opacity/alpha-channel information via ``rgba()`` or ``hsla()``.
If you need to convert between RGB-specified colors and HSL-specified
colors, or colors specified via other means, consult `the colorsys
module`_ in the Python standard library, which can perform conversions
amongst several common color spaces.
.. _the colorsys module: http://docs.python.org/library/colorsys.html
Normalization
-------------
For colors specified via hexadecimal values, this module will accept
input in the following formats:
* A hash mark (#) followed by three hexadecimal digits, where letters
may be upper- or lower-case.
* A hash mark (#) followed by six hexadecimal digits, where letters
may be upper- or lower-case.
For output which consists of a color specified via hexadecimal values,
and for functions which perform intermediate conversion to hexadecimal
before returning a result in another format, this module always
normalizes such values to the following format:
* A hash mark (#) followed by six hexadecimal digits, with letters
forced to lower-case.
The function ``normalize_hex()`` in this module can be used to perform
this normalization manually if desired; see its documentation for an
explanation of the normalization process.
For colors specified via predefined names, this module will accept
input in the following formats:
* An entirely lower-case name, such as ``aliceblue``.
* A name using initial capitals, such as ``AliceBlue``.
For output which consists of a color specified via a predefined name,
and for functions which perform intermediate conversion to a
predefined name before returning a result in another format, this
module always normalizes such values to be entirely lower-case.
Mappings of color names
-----------------------
For each set of defined color names -- HTML 4, CSS 2, CSS 2.1 and CSS
3 -- this module exports two mappings: one of normalized color names
to normalized hexadecimal values, and one of normalized hexadecimal
values to normalized color names. These eight mappings are as follows:
``html4_names_to_hex``
Mapping of normalized HTML 4 color names to normalized hexadecimal
values.
``html4_hex_to_names``
Mapping of normalized hexadecimal values to normalized HTML 4
color names.
``css2_names_to_hex``
Mapping of normalized CSS 2 color names to normalized hexadecimal
values. Because CSS 2 defines the same set of named colors as HTML
4, this is merely an alias for ``html4_names_to_hex``.
``css2_hex_to_names``
Mapping of normalized hexadecimal values to normalized CSS 2 color
nams. For the reasons described above, this is merely an alias for
``html4_hex_to_names``.
``css21_names_to_hex``
Mapping of normalized CSS 2.1 color names to normalized
hexadecimal values. This is identical to ``html4_names_to_hex``,
except for one addition: ``orange``.
``css21_hex_to_names``
Mapping of normalized hexadecimal values to normalized CSS 2.1
color names. As above, this is identical to ``html4_hex_to_names``
except for the addition of ``orange``.
``css3_names_to_hex``
Mapping of normalized CSS3 color names to normalized hexadecimal
values.
``css3_hex_to_names``
Mapping of normalized hexadecimal values to normalized CSS3 color
names.
"""
import math
import re
def _reversedict(d):
"""
Internal helper for generating reverse mappings; given a
dictionary, returns a new dictionary with keys and values swapped.
"""
return dict(zip(d.values(), d.keys()))
HEX_COLOR_RE = re.compile(r'^#([a-fA-F0-9]{3}|[a-fA-F0-9]{6})$')
SUPPORTED_SPECIFICATIONS = ('html4', 'css2', 'css21', 'css3')
######################################################################
# Mappings of color names to normalized hexadecimal color values.
######################################################################
html4_names_to_hex = {
'aqua': '#00ffff',
'black': '#000000',
'blue': '#0000ff',
'fuchsia': '#ff00ff',
'green': '#008000',
'grey': '#808080',
'lime': '#00ff00',
'maroon': '#800000',
'navy': '#000080',
'olive': '#808000',
'purple': '#800080',
'red': '#ff0000',
'silver': '#c0c0c0',
'teal': '#008080',
'white': '#ffffff',
'yellow': '#ffff00'
}
css2_names_to_hex = html4_names_to_hex
css21_names_to_hex = dict(html4_names_to_hex, orange='#ffa500')
css3_names_to_hex = {
'aliceblue': '#f0f8ff',
'antiquewhite': '#faebd7',
'aqua': '#00ffff',
'aquamarine': '#7fffd4',
'azure': '#f0ffff',
'beige': '#f5f5dc',
'bisque': '#ffe4c4',
'black': '#000000',
'blanchedalmond': '#ffebcd',
'blue': '#0000ff',
'blueviolet': '#8a2be2',
'brown': '#a52a2a',
'burlywood': '#deb887',
'cadetblue': '#5f9ea0',
'chartreuse': '#7fff00',
'chocolate': '#d2691e',
'coral': '#ff7f50',
'cornflowerblue': '#6495ed',
'cornsilk': '#fff8dc',
'crimson': '#dc143c',
'cyan': '#00ffff',
'darkblue': '#00008b',
'darkcyan': '#008b8b',
'darkgoldenrod': '#b8860b',
'darkgray': '#a9a9a9',
'darkgrey': '#a9a9a9',
'darkgreen': '#006400',
'darkkhaki': '#bdb76b',
'darkmagenta': '#8b008b',
'darkolivegreen': '#556b2f',
'darkorange': '#ff8c00',
'darkorchid': '#9932cc',
'darkred': '#8b0000',
'darksalmon': '#e9967a',
'darkseagreen': '#8fbc8f',
'darkslateblue': '#483d8b',
'darkslategray': '#2f4f4f',
'darkslategrey': '#2f4f4f',
'darkturquoise': '#00ced1',
'darkviolet': '#9400d3',
'deeppink': '#ff1493',
'deepskyblue': '#00bfff',
'dimgray': '#696969',
'dimgrey': '#696969',
'dodgerblue': '#1e90ff',
'firebrick': '#b22222',
'floralwhite': '#fffaf0',
'forestgreen': '#228b22',
'fuchsia': '#ff00ff',
'gainsboro': '#dcdcdc',
'ghostwhite': '#f8f8ff',
'gold': '#ffd700',
'goldenrod': '#daa520',
'gray': '#808080',
'grey': '#808080',
'green': '#008000',
'greenyellow': '#adff2f',
'honeydew': '#f0fff0',
'hotpink': '#ff69b4',
'indianred': '#cd5c5c',
'indigo': '#4b0082',
'ivory': '#fffff0',
'khaki': '#f0e68c',
'lavender': '#e6e6fa',
'lavenderblush': '#fff0f5',
'lawngreen': '#7cfc00',
'lemonchiffon': '#fffacd',
'lightblue': '#add8e6',
'lightcoral': '#f08080',
'lightcyan': '#e0ffff',
'lightgoldenrodyellow': '#fafad2',
'lightgray': '#d3d3d3',
'lightgrey': '#d3d3d3',
'lightgreen': '#90ee90',
'lightpink': '#ffb6c1',
'lightsalmon': '#ffa07a',
'lightseagreen': '#20b2aa',
'lightskyblue': '#87cefa',
'lightslategray': '#778899',
'lightslategrey': '#778899',
'lightsteelblue': '#b0c4de',
'lightyellow': '#ffffe0',
'lime': '#00ff00',
'limegreen': '#32cd32',
'linen': '#faf0e6',
'magenta': '#ff00ff',
'maroon': '#800000',
'mediumaquamarine': '#66cdaa',
'mediumblue': '#0000cd',
'mediumorchid': '#ba55d3',
'mediumpurple': '#9370d8',
'mediumseagreen': '#3cb371',
'mediumslateblue': '#7b68ee',
'mediumspringgreen': '#00fa9a',
'mediumturquoise': '#48d1cc',
'mediumvioletred': '#c71585',
'midnightblue': '#191970',
'mintcream': '#f5fffa',
'mistyrose': '#ffe4e1',
'moccasin': '#ffe4b5',
'navajowhite': '#ffdead',
'navy': '#000080',
'oldlace': '#fdf5e6',
'olive': '#808000',
'olivedrab': '#6b8e23',
'orange': '#ffa500',
'orangered': '#ff4500',
'orchid': '#da70d6',
'palegoldenrod': '#eee8aa',
'palegreen': '#98fb98',
'paleturquoise': '#afeeee',
'palevioletred': '#d87093',
'papayawhip': '#ffefd5',
'peachpuff': '#ffdab9',
'peru': '#cd853f',
'pink': '#ffc0cb',
'plum': '#dda0dd',
'powderblue': '#b0e0e6',
'purple': '#800080',
'red': '#ff0000',
'rosybrown': '#bc8f8f',
'royalblue': '#4169e1',
'saddlebrown': '#8b4513',
'salmon': '#fa8072',
'sandybrown': '#f4a460',
'seagreen': '#2e8b57',
'seashell': '#fff5ee',
'sienna': '#a0522d',
'silver': '#c0c0c0',
'skyblue': '#87ceeb',
'slateblue': '#6a5acd',
'slategray': '#708090',
'slategrey': '#708090',
'snow': '#fffafa',
'springgreen': '#00ff7f',
'steelblue': '#4682b4',
'tan': '#d2b48c',
'teal': '#008080',
'thistle': '#d8bfd8',
'tomato': '#ff6347',
'turquoise': '#40e0d0',
'violet': '#ee82ee',
'wheat': '#f5deb3',
'white': '#ffffff',
'whitesmoke': '#f5f5f5',
'yellow': '#ffff00',
'yellowgreen': '#9acd32',
}
######################################################################
# Mappings of normalized hexadecimal color values to color names.
######################################################################
html4_hex_to_names = _reversedict(html4_names_to_hex)
css2_hex_to_names = html4_hex_to_names
css21_hex_to_names = _reversedict(css21_names_to_hex)
css3_hex_to_names = _reversedict(css3_names_to_hex)
######################################################################
# Normalization routines.
######################################################################
def normalize_hex(hex_value):
"""
Normalize a hexadecimal color value to the following form and
return the result::
#[a-f0-9]{6}
In other words, the following transformations are applied as
needed:
* If the value contains only three hexadecimal digits, it is
expanded to six.
* The value is normalized to lower-case.
If the supplied value cannot be interpreted as a hexadecimal color
value, ``ValueError`` is raised.
Examples:
>>> normalize_hex('#09c')
'#0099cc'
>>> normalize_hex('#0099cc')
'#0099cc'
>>> normalize_hex('#09C')
'#0099cc'
>>> normalize_hex('#0099CC')
'#0099cc'
>>> normalize_hex('0099cc')
Traceback (most recent call last):
...
ValueError: '0099cc' is not a valid hexadecimal color value.
>>> normalize_hex('#0099QX')
Traceback (most recent call last):
...
ValueError: '#0099QX' is not a valid hexadecimal color value.
>>> normalize_hex('foobarbaz')
Traceback (most recent call last):
...
ValueError: 'foobarbaz' is not a valid hexadecimal color value.
>>> normalize_hex('#0')
Traceback (most recent call last):
...
ValueError: '#0' is not a valid hexadecimal color value.
"""
try:
hex_digits = HEX_COLOR_RE.match(hex_value).groups()[0]
except AttributeError:
raise ValueError("'%s' is not a valid hexadecimal color value." % hex_value)
if len(hex_digits) == 3:
hex_digits = ''.join(map(lambda s: 2 * s, hex_digits))
return '#%s' % hex_digits.lower()
######################################################################
# Conversions from color names to various formats.
######################################################################
def name_to_hex(name, spec='css3'):
"""
Convert a color name to a normalized hexadecimal color value.
The optional keyword argument ``spec`` determines which
specification's list of color names will be used; valid values are
``html4``, ``css2``, ``css21`` and ``css3``, and the default is
``css3``.
The color name will be normalized to lower-case before being
looked up, and when no color of that name exists in the given
specification, ``ValueError`` is raised.
Examples:
>>> name_to_hex('deepskyblue')
'#00bfff'
>>> name_to_hex('DeepSkyBlue')
'#00bfff'
>>> name_to_hex('white', spec='html4')
'#ffffff'
>>> name_to_hex('white', spec='css2')
'#ffffff'
>>> name_to_hex('white', spec='css21')
'#ffffff'
>>> name_to_hex('white', spec='css3')
'#ffffff'
>>> name_to_hex('white', spec='css4')
Traceback (most recent call last):
...
TypeError: 'css4' is not a supported specification for color name lookups; supported specifications are: html4, css2, css21, css3.
>>> name_to_hex('deepskyblue', spec='css2')
Traceback (most recent call last):
...
ValueError: 'deepskyblue' is not defined as a named color in css2.
"""
if spec not in SUPPORTED_SPECIFICATIONS:
raise TypeError("'%s' is not a supported specification for color name lookups; supported specifications are: %s." % (spec,
', '.join(SUPPORTED_SPECIFICATIONS)))
normalized = name.lower()
try:
hex_value = globals()['%s_names_to_hex' % spec][normalized]
except KeyError:
raise ValueError("'%s' is not defined as a named color in %s." % (name, spec))
return hex_value
def name_to_rgb(name, spec='css3'):
"""
Convert a color name to a 3-tuple of integers suitable for use in
an ``rgb()`` triplet specifying that color.
The optional keyword argument ``spec`` determines which
specification's list of color names will be used; valid values are
``html4``, ``css2``, ``css21`` and ``css3``, and the default is
``css3``.
The color name will be normalized to lower-case before being
looked up, and when no color of that name exists in the given
specification, ``ValueError`` is raised.
Examples:
>>> name_to_rgb('navy')
(0, 0, 128)
>>> name_to_rgb('cadetblue')
(95, 158, 160)
>>> name_to_rgb('cadetblue', spec='html4')
Traceback (most recent call last):
...
ValueError: 'cadetblue' is not defined as a named color in html4.
"""
return hex_to_rgb(name_to_hex(name, spec=spec))
def name_to_rgb_percent(name, spec='css3'):
"""
Convert a color name to a 3-tuple of percentages suitable for use
in an ``rgb()`` triplet specifying that color.
The optional keyword argument ``spec`` determines which
specification's list of color names will be used; valid values are
``html4``, ``css2``, ``css21`` and ``css3``, and the default is
``css3``.
The color name will be normalized to lower-case before being
looked up, and when no color of that name exists in the given
specification, ``ValueError`` is raised.
Examples:
>>> name_to_rgb_percent('white')
('100%', '100%', '100%')
>>> name_to_rgb_percent('navy')
('0%', '0%', '50%')
>>> name_to_rgb_percent('goldenrod')
('85.49%', '64.71%', '12.5%')
"""
return rgb_to_rgb_percent(name_to_rgb(name, spec=spec))
######################################################################
# Conversions from hexadecimal color values to various formats.
######################################################################
def hex_to_name(hex_value, spec='css3'):
"""
Convert a hexadecimal color value to its corresponding normalized
color name, if any such name exists.
The optional keyword argument ``spec`` determines which
specification's list of color names will be used; valid values are
``html4``, ``css2``, ``css21`` and ``css3``, and the default is
``css3``.
The hexadecimal value will be normalized before being looked up,
and when no color name for the value is found in the given
specification, ``ValueError`` is raised.
Examples:
>>> hex_to_name('#000080')
'navy'
>>> hex_to_name('#000080', spec='html4')
'navy'
>>> hex_to_name('#000080', spec='css2')
'navy'
>>> hex_to_name('#000080', spec='css21')
'navy'
>>> hex_to_name('#8b4513')
'saddlebrown'
>>> hex_to_name('#8b4513', spec='html4')
Traceback (most recent call last):
...
ValueError: '#8b4513' has no defined color name in html4.
>>> hex_to_name('#8b4513', spec='css4')
Traceback (most recent call last):
...
TypeError: 'css4' is not a supported specification for color name lookups; supported specifications are: html4, css2, css21, css3.
"""
if spec not in SUPPORTED_SPECIFICATIONS:
raise TypeError("'%s' is not a supported specification for color name lookups; supported specifications are: %s." % (spec,
', '.join(SUPPORTED_SPECIFICATIONS)))
normalized = normalize_hex(hex_value)
try:
name = globals()['%s_hex_to_names' % spec][normalized]
except KeyError:
raise ValueError("'%s' has no defined color name in %s." % (hex_value, spec))
return name
def hex_to_rgb(hex_value):
"""
Convert a hexadecimal color value to a 3-tuple of integers
suitable for use in an ``rgb()`` triplet specifying that color.
The hexadecimal value will be normalized before being converted.
Examples:
>>> hex_to_rgb('#000080')
(0, 0, 128)
>>> hex_to_rgb('#ffff00')
(255, 255, 0)
>>> hex_to_rgb('#f00')
(255, 0, 0)
>>> hex_to_rgb('#deb887')
(222, 184, 135)
"""
hex_digits = normalize_hex(hex_value)
return tuple(map(lambda s: int(s, 16),
(hex_digits[1:3], hex_digits[3:5], hex_digits[5:7])))
def hex_to_rgb_percent(hex_value):
"""
Convert a hexadecimal color value to a 3-tuple of percentages
suitable for use in an ``rgb()`` triplet representing that color.
The hexadecimal value will be normalized before converting.
Examples:
>>> hex_to_rgb_percent('#ffffff')
('100%', '100%', '100%')
>>> hex_to_rgb_percent('#000080')
('0%', '0%', '50%')
"""
return rgb_to_rgb_percent(hex_to_rgb(hex_value))
######################################################################
# Conversions from integer rgb() triplets to various formats.
######################################################################
def rgb_to_name(rgb_triplet, spec='css3'):
"""
Convert a 3-tuple of integers, suitable for use in an ``rgb()``
color triplet, to its corresponding normalized color name, if any
such name exists.
The optional keyword argument ``spec`` determines which
specification's list of color names will be used; valid values are
``html4``, ``css2``, ``css21`` and ``css3``, and the default is
``css3``.
If there is no matching name, ``ValueError`` is raised.
Examples:
>>> rgb_to_name((0, 0, 0))
'black'
>>> rgb_to_name((0, 0, 128))
'navy'
>>> rgb_to_name((95, 158, 160))
'cadetblue'
"""
return hex_to_name(rgb_to_hex(rgb_triplet), spec=spec)
def rgb_to_hex(rgb_triplet):
"""
Convert a 3-tuple of integers, suitable for use in an ``rgb()``
color triplet, to a normalized hexadecimal value for that color.
Examples:
>>> rgb_to_hex((255, 255, 255))
'#ffffff'
>>> rgb_to_hex((0, 0, 128))
'#000080'
>>> rgb_to_hex((33, 56, 192))
'#2138c0'
"""
return '#%02x%02x%02x' % rgb_triplet
def rgb_to_rgb_percent(rgb_triplet):
"""
Convert a 3-tuple of integers, suitable for use in an ``rgb()``
color triplet, to a 3-tuple of percentages suitable for use in
representing that color.
This function makes some trade-offs in terms of the accuracy of
the final representation; for some common integer values,
special-case logic is used to ensure a precise result (e.g.,
integer 128 will always convert to '50%', integer 32 will always
convert to '12.5%'), but for all other values a standard Python
``float`` is used and rounded to two decimal places, which may
result in a loss of precision for some values.
Examples:
>>> rgb_to_rgb_percent((255, 255, 255))
('100%', '100%', '100%')
>>> rgb_to_rgb_percent((0, 0, 128))
('0%', '0%', '50%')
>>> rgb_to_rgb_percent((33, 56, 192))
('12.94%', '21.96%', '75.29%')
>>> rgb_to_rgb_percent((64, 32, 16))
('25%', '12.5%', '6.25%')
"""
# In order to maintain precision for common values,
# 256 / 2**n is special-cased for values of n
# from 0 through 4, as well as 0 itself.
specials = { 255: '100%', 128: '50%', 64: '25%',
32: '12.5%', 16: '6.25%', 0: '0%' }
return tuple(map(lambda d: specials.get(d, '%.02f%%' % ((d / 255.0) * 100)),
rgb_triplet))
######################################################################
# Conversions from percentage rgb() triplets to various formats.
######################################################################
def rgb_percent_to_name(rgb_percent_triplet, spec='css3'):
"""
Convert a 3-tuple of percentages, suitable for use in an ``rgb()``
color triplet, to its corresponding normalized color name, if any
such name exists.
The optional keyword argument ``spec`` determines which
specification's list of color names will be used; valid values are
``html4``, ``css2``, ``css21`` and ``css3``, and the default is
``css3``.
If there is no matching name, ``ValueError`` is raised.
Examples:
>>> rgb_percent_to_name(('0%', '0%', '0%'))
'black'
>>> rgb_percent_to_name(('0%', '0%', '50%'))
'navy'
>>> rgb_percent_to_name(('85.49%', '64.71%', '12.5%'))
'goldenrod'
"""
return rgb_to_name(rgb_percent_to_rgb(rgb_percent_triplet), spec=spec)
def rgb_percent_to_hex(rgb_percent_triplet):
"""
Convert a 3-tuple of percentages, suitable for use in an ``rgb()``
color triplet, to a normalized hexadecimal color value for that
color.
Examples:
>>> rgb_percent_to_hex(('100%', '100%', '0%'))
'#ffff00'
>>> rgb_percent_to_hex(('0%', '0%', '50%'))
'#000080'
>>> rgb_percent_to_hex(('85.49%', '64.71%', '12.5%'))
'#daa520'
"""
return rgb_to_hex(rgb_percent_to_rgb(rgb_percent_triplet))
def _percent_to_integer(percent):
"""
Internal helper for converting a percentage value to an integer
between 0 and 255 inclusive.
"""
num = float(percent.split('%')[0]) / 100.0 * 255
e = num - math.floor(num)
return e < 0.5 and int(math.floor(num)) or int(math.ceil(num))
def rgb_percent_to_rgb(rgb_percent_triplet):
"""
Convert a 3-tuple of percentages, suitable for use in an ``rgb()``
color triplet, to a 3-tuple of integers suitable for use in
representing that color.
Some precision may be lost in this conversion. See the note
regarding precision for ``rgb_to_rgb_percent()`` for details;
generally speaking, the following is true for any 3-tuple ``t`` of
integers in the range 0...255 inclusive::
t == rgb_percent_to_rgb(rgb_to_rgb_percent(t))
Examples:
>>> rgb_percent_to_rgb(('100%', '100%', '100%'))
(255, 255, 255)
>>> rgb_percent_to_rgb(('0%', '0%', '50%'))
(0, 0, 128)
>>> rgb_percent_to_rgb(('25%', '12.5%', '6.25%'))
(64, 32, 16)
>>> rgb_percent_to_rgb(('12.94%', '21.96%', '75.29%'))
(33, 56, 192)
"""
return tuple(map(_percent_to_integer, rgb_percent_triplet))
if __name__ == '__main__':
import doctest
doctest.testmod()
| |
import datetime
import logging
from ..fields import (
Base64Field,
BodyContentAttributedValueField,
BooleanField,
CharField,
Choice,
ChoiceField,
DateTimeBackedDateField,
DateTimeField,
EmailAddressAttributedValueField,
EmailAddressesField,
EmailAddressField,
EWSElementField,
EWSElementListField,
IdElementField,
MailboxField,
MailboxListField,
MemberListField,
PersonaPhoneNumberField,
PhoneNumberAttributedValueField,
PhoneNumberField,
PhysicalAddressField,
PostalAddressAttributedValueField,
StringAttributedValueField,
TextField,
TextListField,
URIField,
)
from ..properties import Address, Attribution, CompleteName, EmailAddress, FolderId, IdChangeKeyMixIn, PersonaId
from ..util import TNS
from ..version import EXCHANGE_2010, EXCHANGE_2010_SP2
from .item import Item
log = logging.getLogger(__name__)
class Contact(Item):
"""MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/contact"""
ELEMENT_NAME = "Contact"
file_as = TextField(field_uri="contacts:FileAs")
file_as_mapping = ChoiceField(
field_uri="contacts:FileAsMapping",
choices={
Choice("None"),
Choice("LastCommaFirst"),
Choice("FirstSpaceLast"),
Choice("Company"),
Choice("LastCommaFirstCompany"),
Choice("CompanyLastFirst"),
Choice("LastFirst"),
Choice("LastFirstCompany"),
Choice("CompanyLastCommaFirst"),
Choice("LastFirstSuffix"),
Choice("LastSpaceFirstCompany"),
Choice("CompanyLastSpaceFirst"),
Choice("LastSpaceFirst"),
Choice("DisplayName"),
Choice("FirstName"),
Choice("LastFirstMiddleSuffix"),
Choice("LastName"),
Choice("Empty"),
},
)
display_name = TextField(field_uri="contacts:DisplayName", is_required=True)
given_name = CharField(field_uri="contacts:GivenName")
initials = TextField(field_uri="contacts:Initials")
middle_name = CharField(field_uri="contacts:MiddleName")
nickname = TextField(field_uri="contacts:Nickname")
complete_name = EWSElementField(field_uri="contacts:CompleteName", value_cls=CompleteName, is_read_only=True)
company_name = TextField(field_uri="contacts:CompanyName")
email_addresses = EmailAddressesField(field_uri="contacts:EmailAddress")
physical_addresses = PhysicalAddressField(field_uri="contacts:PhysicalAddress")
phone_numbers = PhoneNumberField(field_uri="contacts:PhoneNumber")
assistant_name = TextField(field_uri="contacts:AssistantName")
birthday = DateTimeBackedDateField(field_uri="contacts:Birthday", default_time=datetime.time(11, 59))
business_homepage = URIField(field_uri="contacts:BusinessHomePage")
children = TextListField(field_uri="contacts:Children")
companies = TextListField(field_uri="contacts:Companies", is_searchable=False)
contact_source = ChoiceField(
field_uri="contacts:ContactSource", choices={Choice("Store"), Choice("ActiveDirectory")}, is_read_only=True
)
department = TextField(field_uri="contacts:Department")
generation = TextField(field_uri="contacts:Generation")
im_addresses = CharField(field_uri="contacts:ImAddresses", is_read_only=True)
job_title = TextField(field_uri="contacts:JobTitle")
manager = TextField(field_uri="contacts:Manager")
mileage = TextField(field_uri="contacts:Mileage")
office = TextField(field_uri="contacts:OfficeLocation")
postal_address_index = ChoiceField(
field_uri="contacts:PostalAddressIndex",
choices={Choice("Business"), Choice("Home"), Choice("Other"), Choice("None")},
default="None",
is_required_after_save=True,
)
profession = TextField(field_uri="contacts:Profession")
spouse_name = TextField(field_uri="contacts:SpouseName")
surname = CharField(field_uri="contacts:Surname")
wedding_anniversary = DateTimeBackedDateField(
field_uri="contacts:WeddingAnniversary", default_time=datetime.time(11, 59)
)
has_picture = BooleanField(field_uri="contacts:HasPicture", supported_from=EXCHANGE_2010, is_read_only=True)
phonetic_full_name = TextField(
field_uri="contacts:PhoneticFullName", supported_from=EXCHANGE_2010_SP2, is_read_only=True
)
phonetic_first_name = TextField(
field_uri="contacts:PhoneticFirstName", supported_from=EXCHANGE_2010_SP2, is_read_only=True
)
phonetic_last_name = TextField(
field_uri="contacts:PhoneticLastName", supported_from=EXCHANGE_2010_SP2, is_read_only=True
)
email_alias = EmailAddressField(field_uri="contacts:Alias", is_read_only=True, supported_from=EXCHANGE_2010_SP2)
# 'notes' is documented in MSDN but apparently unused. Writing to it raises ErrorInvalidPropertyRequest. OWA
# put entries into the 'notes' form field into the 'body' field.
notes = CharField(field_uri="contacts:Notes", supported_from=EXCHANGE_2010_SP2, is_read_only=True)
# 'photo' is documented in MSDN but apparently unused. Writing to it raises ErrorInvalidPropertyRequest. OWA
# adds photos as FileAttachments on the contact item (with 'is_contact_photo=True'), which automatically flips
# the 'has_picture' field.
photo = Base64Field(field_uri="contacts:Photo", supported_from=EXCHANGE_2010_SP2, is_read_only=True)
user_smime_certificate = Base64Field(
field_uri="contacts:UserSMIMECertificate", supported_from=EXCHANGE_2010_SP2, is_read_only=True
)
ms_exchange_certificate = Base64Field(
field_uri="contacts:MSExchangeCertificate", supported_from=EXCHANGE_2010_SP2, is_read_only=True
)
directory_id = TextField(field_uri="contacts:DirectoryId", supported_from=EXCHANGE_2010_SP2, is_read_only=True)
manager_mailbox = MailboxField(
field_uri="contacts:ManagerMailbox", supported_from=EXCHANGE_2010_SP2, is_read_only=True
)
direct_reports = MailboxListField(
field_uri="contacts:DirectReports", supported_from=EXCHANGE_2010_SP2, is_read_only=True
)
class Persona(IdChangeKeyMixIn):
"""MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/persona"""
ELEMENT_NAME = "Persona"
ID_ELEMENT_CLS = PersonaId
_id = IdElementField(field_uri="persona:PersonaId", value_cls=ID_ELEMENT_CLS, namespace=TNS)
persona_type = CharField(field_uri="persona:PersonaType")
persona_object_type = TextField(field_uri="persona:PersonaObjectStatus")
creation_time = DateTimeField(field_uri="persona:CreationTime")
bodies = BodyContentAttributedValueField(field_uri="persona:Bodies")
display_name_first_last_sort_key = TextField(field_uri="persona:DisplayNameFirstLastSortKey")
display_name_last_first_sort_key = TextField(field_uri="persona:DisplayNameLastFirstSortKey")
company_sort_key = TextField(field_uri="persona:CompanyNameSortKey")
home_sort_key = TextField(field_uri="persona:HomeCitySortKey")
work_city_sort_key = TextField(field_uri="persona:WorkCitySortKey")
display_name_first_last_header = CharField(field_uri="persona:DisplayNameFirstLastHeader")
display_name_last_first_header = CharField(field_uri="persona:DisplayNameLastFirstHeader")
file_as_header = TextField(field_uri="persona:FileAsHeader")
display_name = CharField(field_uri="persona:DisplayName")
display_name_first_last = CharField(field_uri="persona:DisplayNameFirstLast")
display_name_last_first = CharField(field_uri="persona:DisplayNameLastFirst")
file_as = CharField(field_uri="persona:FileAs")
file_as_id = TextField(field_uri="persona:FileAsId")
display_name_prefix = CharField(field_uri="persona:DisplayNamePrefix")
given_name = CharField(field_uri="persona:GivenName")
middle_name = CharField(field_uri="persona:MiddleName")
surname = CharField(field_uri="persona:Surname")
generation = CharField(field_uri="persona:Generation")
nickname = TextField(field_uri="persona:Nickname")
yomi_company_name = TextField(field_uri="persona:YomiCompanyName")
yomi_first_name = TextField(field_uri="persona:YomiFirstName")
yomi_last_name = TextField(field_uri="persona:YomiLastName")
title = CharField(field_uri="persona:Title")
department = TextField(field_uri="persona:Department")
company_name = CharField(field_uri="persona:CompanyName")
email_address = EWSElementField(field_uri="persona:EmailAddress", value_cls=EmailAddress)
email_addresses = EWSElementListField(field_uri="persona:EmailAddresses", value_cls=Address)
PhoneNumber = PersonaPhoneNumberField(field_uri="persona:PhoneNumber")
im_address = CharField(field_uri="persona:ImAddress")
home_city = CharField(field_uri="persona:HomeCity")
work_city = CharField(field_uri="persona:WorkCity")
relevance_score = CharField(field_uri="persona:RelevanceScore")
folder_ids = EWSElementListField(field_uri="persona:FolderIds", value_cls=FolderId)
attributions = EWSElementListField(field_uri="persona:Attributions", value_cls=Attribution)
display_names = StringAttributedValueField(field_uri="persona:DisplayNames")
file_ases = StringAttributedValueField(field_uri="persona:FileAses")
file_as_ids = StringAttributedValueField(field_uri="persona:FileAsIds")
display_name_prefixes = StringAttributedValueField(field_uri="persona:DisplayNamePrefixes")
given_names = StringAttributedValueField(field_uri="persona:GivenNames")
middle_names = StringAttributedValueField(field_uri="persona:MiddleNames")
surnames = StringAttributedValueField(field_uri="persona:Surnames")
generations = StringAttributedValueField(field_uri="persona:Generations")
nicknames = StringAttributedValueField(field_uri="persona:Nicknames")
initials = StringAttributedValueField(field_uri="persona:Initials")
yomi_company_names = StringAttributedValueField(field_uri="persona:YomiCompanyNames")
yomi_first_names = StringAttributedValueField(field_uri="persona:YomiFirstNames")
yomi_last_names = StringAttributedValueField(field_uri="persona:YomiLastNames")
business_phone_numbers = PhoneNumberAttributedValueField(field_uri="persona:BusinessPhoneNumbers")
business_phone_numbers2 = PhoneNumberAttributedValueField(field_uri="persona:BusinessPhoneNumbers2")
home_phones = PhoneNumberAttributedValueField(field_uri="persona:HomePhones")
home_phones2 = PhoneNumberAttributedValueField(field_uri="persona:HomePhones2")
mobile_phones = PhoneNumberAttributedValueField(field_uri="persona:MobilePhones")
mobile_phones2 = PhoneNumberAttributedValueField(field_uri="persona:MobilePhones2")
assistant_phone_numbers = PhoneNumberAttributedValueField(field_uri="persona:AssistantPhoneNumbers")
callback_phones = PhoneNumberAttributedValueField(field_uri="persona:CallbackPhones")
car_phones = PhoneNumberAttributedValueField(field_uri="persona:CarPhones")
home_faxes = PhoneNumberAttributedValueField(field_uri="persona:HomeFaxes")
organization_main_phones = PhoneNumberAttributedValueField(field_uri="persona:OrganizationMainPhones")
other_faxes = PhoneNumberAttributedValueField(field_uri="persona:OtherFaxes")
other_telephones = PhoneNumberAttributedValueField(field_uri="persona:OtherTelephones")
other_phones2 = PhoneNumberAttributedValueField(field_uri="persona:OtherPhones2")
pagers = PhoneNumberAttributedValueField(field_uri="persona:Pagers")
radio_phones = PhoneNumberAttributedValueField(field_uri="persona:RadioPhones")
telex_numbers = PhoneNumberAttributedValueField(field_uri="persona:TelexNumbers")
tty_tdd_phone_numbers = PhoneNumberAttributedValueField(field_uri="persona:TTYTDDPhoneNumbers")
work_faxes = PhoneNumberAttributedValueField(field_uri="persona:WorkFaxes")
emails1 = EmailAddressAttributedValueField(field_uri="persona:Emails1")
emails2 = EmailAddressAttributedValueField(field_uri="persona:Emails2")
emails3 = EmailAddressAttributedValueField(field_uri="persona:Emails3")
business_home_pages = StringAttributedValueField(field_uri="persona:BusinessHomePages")
personal_home_pages = StringAttributedValueField(field_uri="persona:PersonalHomePages")
office_locations = StringAttributedValueField(field_uri="persona:OfficeLocations")
im_addresses = StringAttributedValueField(field_uri="persona:ImAddresses")
im_addresses2 = StringAttributedValueField(field_uri="persona:ImAddresses2")
im_addresses3 = StringAttributedValueField(field_uri="persona:ImAddresses3")
business_addresses = PostalAddressAttributedValueField(field_uri="persona:BusinessAddresses")
home_addresses = PostalAddressAttributedValueField(field_uri="persona:HomeAddresses")
other_addresses = PostalAddressAttributedValueField(field_uri="persona:OtherAddresses")
titles = StringAttributedValueField(field_uri="persona:Titles")
departments = StringAttributedValueField(field_uri="persona:Departments")
company_names = StringAttributedValueField(field_uri="persona:CompanyNames")
managers = StringAttributedValueField(field_uri="persona:Managers")
assistant_names = StringAttributedValueField(field_uri="persona:AssistantNames")
professions = StringAttributedValueField(field_uri="persona:Professions")
spouse_names = StringAttributedValueField(field_uri="persona:SpouseNames")
children = StringAttributedValueField(field_uri="persona:Children")
schools = StringAttributedValueField(field_uri="persona:Schools")
hobbies = StringAttributedValueField(field_uri="persona:Hobbies")
wedding_anniversaries = StringAttributedValueField(field_uri="persona:WeddingAnniversaries")
birthdays = StringAttributedValueField(field_uri="persona:Birthdays")
locations = StringAttributedValueField(field_uri="persona:Locations")
# This class has an additional field of type "ExtendedPropertyAttributedValueField" and
# field_uri 'persona:ExtendedProperties'
class DistributionList(Item):
"""MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/distributionlist"""
ELEMENT_NAME = "DistributionList"
display_name = CharField(field_uri="contacts:DisplayName", is_required=True)
file_as = CharField(field_uri="contacts:FileAs", is_read_only=True)
contact_source = ChoiceField(
field_uri="contacts:ContactSource", choices={Choice("Store"), Choice("ActiveDirectory")}, is_read_only=True
)
members = MemberListField(field_uri="distributionlist:Members")
| |
import os
import sys
import tempfile
from pecan.tests import PecanTestCase
from six import b as b_
__here__ = os.path.dirname(__file__)
class TestConf(PecanTestCase):
def test_update_config_fail_identifier(self):
"""Fail when naming does not pass correctness"""
from pecan import configuration
bad_dict = {'bad name': 'value'}
self.assertRaises(ValueError, configuration.Config, bad_dict)
def test_update_set_config(self):
"""Update an empty configuration with the default values"""
from pecan import configuration
conf = configuration.initconf()
conf.update(configuration.conf_from_file(os.path.join(
__here__,
'config_fixtures/config.py'
)))
self.assertEqual(conf.app.root, None)
self.assertEqual(conf.app.template_path, 'myproject/templates')
self.assertEqual(conf.app.static_root, 'public')
self.assertEqual(conf.server.host, '1.1.1.1')
self.assertEqual(conf.server.port, '8081')
def test_update_set_default_config(self):
"""Update an empty configuration with the default values"""
from pecan import configuration
conf = configuration.initconf()
conf.update(configuration.conf_from_file(os.path.join(
__here__,
'config_fixtures/empty.py'
)))
self.assertEqual(conf.app.root, None)
self.assertEqual(conf.app.template_path, '')
self.assertEqual(conf.app.static_root, 'public')
self.assertEqual(conf.server.host, '0.0.0.0')
self.assertEqual(conf.server.port, '8080')
def test_update_force_dict(self):
"""Update an empty configuration with the default values"""
from pecan import configuration
conf = configuration.initconf()
conf.update(configuration.conf_from_file(os.path.join(
__here__,
'config_fixtures/forcedict.py'
)))
self.assertEqual(conf.app.root, None)
self.assertEqual(conf.app.template_path, '')
self.assertEqual(conf.app.static_root, 'public')
self.assertEqual(conf.server.host, '0.0.0.0')
self.assertEqual(conf.server.port, '8080')
self.assertTrue(isinstance(conf.beaker, dict))
self.assertEqual(conf.beaker['session.key'], 'key')
self.assertEqual(conf.beaker['session.type'], 'cookie')
self.assertEqual(
conf.beaker['session.validate_key'],
'1a971a7df182df3e1dec0af7c6913ec7'
)
self.assertEqual(conf.beaker.get('__force_dict__'), None)
def test_update_config_with_dict(self):
from pecan import configuration
conf = configuration.initconf()
d = {'attr': True}
conf['attr'] = d
self.assertTrue(conf.attr.attr)
def test_config_repr(self):
from pecan import configuration
conf = configuration.Config({'a': 1})
self.assertEqual(repr(conf), "Config({'a': 1})")
def test_config_from_dict(self):
from pecan import configuration
conf = configuration.conf_from_dict({})
conf['path'] = '%(confdir)s'
self.assertTrue(os.path.samefile(conf['path'], os.getcwd()))
def test_config_from_file(self):
from pecan import configuration
path = os.path.join(
os.path.dirname(__file__), 'config_fixtures', 'config.py'
)
configuration.conf_from_file(path)
def test_config_illegal_ids(self):
from pecan import configuration
conf = configuration.Config({})
conf.update(configuration.conf_from_file(os.path.join(
__here__,
'config_fixtures/bad/module_and_underscore.py'
)))
self.assertEqual([], list(conf))
def test_config_missing_file(self):
from pecan import configuration
path = ('doesnotexist.py',)
configuration.Config({})
self.assertRaises(
RuntimeError,
configuration.conf_from_file,
os.path.join(__here__, 'config_fixtures', *path)
)
def test_config_missing_file_on_path(self):
from pecan import configuration
path = ('bad', 'bad', 'doesnotexist.py',)
configuration.Config({})
self.assertRaises(
RuntimeError,
configuration.conf_from_file,
os.path.join(__here__, 'config_fixtures', *path)
)
def test_config_with_syntax_error(self):
from pecan import configuration
with tempfile.NamedTemporaryFile('wb') as f:
f.write(b_('\n'.join(['if false', 'var = 3'])))
f.flush()
configuration.Config({})
self.assertRaises(
SyntaxError,
configuration.conf_from_file,
f.name
)
def test_config_with_non_package_relative_import(self):
from pecan import configuration
with tempfile.NamedTemporaryFile('wb', suffix='.py') as f:
f.write(b_('\n'.join(['from . import variables'])))
f.flush()
configuration.Config({})
try:
configuration.conf_from_file(f.name)
except (ValueError, SystemError) as e:
assert 'relative import' in str(e)
else:
raise AssertionError(
"A relative import-related error should have been raised"
)
def test_config_with_bad_import(self):
from pecan import configuration
path = ('bad', 'importerror.py')
configuration.Config({})
self.assertRaises(
ImportError,
configuration.conf_from_file,
os.path.join(
__here__,
'config_fixtures',
*path
)
)
def test_config_dir(self):
from pecan import configuration
if sys.version_info >= (2, 6):
conf = configuration.Config({})
self.assertEqual([], dir(conf))
conf = configuration.Config({'a': 1})
self.assertEqual(['a'], dir(conf))
def test_config_bad_key(self):
from pecan import configuration
conf = configuration.Config({'a': 1})
assert conf.a == 1
self.assertRaises(AttributeError, getattr, conf, 'b')
def test_config_get_valid_key(self):
from pecan import configuration
conf = configuration.Config({'a': 1})
assert conf.get('a') == 1
def test_config_get_invalid_key(self):
from pecan import configuration
conf = configuration.Config({'a': 1})
assert conf.get('b') is None
def test_config_get_invalid_key_return_default(self):
from pecan import configuration
conf = configuration.Config({'a': 1})
assert conf.get('b', True) is True
def test_config_to_dict(self):
from pecan import configuration
conf = configuration.initconf()
assert isinstance(conf, configuration.Config)
to_dict = conf.to_dict()
assert isinstance(to_dict, dict)
assert to_dict['server']['host'] == '0.0.0.0'
assert to_dict['server']['port'] == '8080'
assert to_dict['app']['modules'] == []
assert to_dict['app']['root'] is None
assert to_dict['app']['static_root'] == 'public'
assert to_dict['app']['template_path'] == ''
def test_config_to_dict_nested(self):
from pecan import configuration
"""have more than one level nesting and convert to dict"""
conf = configuration.initconf()
nested = {'one': {'two': 2}}
conf['nested'] = nested
to_dict = conf.to_dict()
assert isinstance(to_dict, dict)
assert to_dict['server']['host'] == '0.0.0.0'
assert to_dict['server']['port'] == '8080'
assert to_dict['app']['modules'] == []
assert to_dict['app']['root'] is None
assert to_dict['app']['static_root'] == 'public'
assert to_dict['app']['template_path'] == ''
assert to_dict['nested']['one']['two'] == 2
def test_config_to_dict_prefixed(self):
from pecan import configuration
"""Add a prefix for keys"""
conf = configuration.initconf()
assert isinstance(conf, configuration.Config)
to_dict = conf.to_dict('prefix_')
assert isinstance(to_dict, dict)
assert to_dict['prefix_server']['prefix_host'] == '0.0.0.0'
assert to_dict['prefix_server']['prefix_port'] == '8080'
assert to_dict['prefix_app']['prefix_modules'] == []
assert to_dict['prefix_app']['prefix_root'] is None
assert to_dict['prefix_app']['prefix_static_root'] == 'public'
assert to_dict['prefix_app']['prefix_template_path'] == ''
class TestGlobalConfig(PecanTestCase):
def tearDown(self):
from pecan import configuration
configuration.set_config(
dict(configuration.initconf()),
overwrite=True
)
def test_paint_from_dict(self):
from pecan import configuration
configuration.set_config({'foo': 'bar'})
assert dict(configuration._runtime_conf) != {'foo': 'bar'}
self.assertEqual(configuration._runtime_conf.foo, 'bar')
def test_overwrite_from_dict(self):
from pecan import configuration
configuration.set_config({'foo': 'bar'}, overwrite=True)
assert dict(configuration._runtime_conf) == {'foo': 'bar'}
def test_paint_from_file(self):
from pecan import configuration
configuration.set_config(os.path.join(
__here__,
'config_fixtures/foobar.py'
))
assert dict(configuration._runtime_conf) != {'foo': 'bar'}
assert configuration._runtime_conf.foo == 'bar'
def test_overwrite_from_file(self):
from pecan import configuration
configuration.set_config(
os.path.join(
__here__,
'config_fixtures/foobar.py',
),
overwrite=True
)
assert dict(configuration._runtime_conf) == {'foo': 'bar'}
def test_set_config_none_type(self):
from pecan import configuration
self.assertRaises(RuntimeError, configuration.set_config, None)
def test_set_config_to_dir(self):
from pecan import configuration
self.assertRaises(RuntimeError, configuration.set_config, '/')
class TestConfFromEnv(PecanTestCase):
#
# Note that there is a good chance of pollution if ``tearDown`` does not
# reset the configuration like this class does. If implementing new classes
# for configuration this tearDown **needs to be implemented**
#
def setUp(self):
super(TestConfFromEnv, self).setUp()
self.addCleanup(self._remove_config_key)
from pecan import configuration
self.get_conf_path_from_env = configuration.get_conf_path_from_env
def _remove_config_key(self):
os.environ.pop('PECAN_CONFIG', None)
def test_invalid_path(self):
os.environ['PECAN_CONFIG'] = '/'
msg = "PECAN_CONFIG was set to an invalid path: /"
self.assertRaisesRegexp(
RuntimeError,
msg,
self.get_conf_path_from_env
)
def test_is_not_set(self):
msg = "PECAN_CONFIG is not set and " \
"no config file was passed as an argument."
self.assertRaisesRegexp(
RuntimeError,
msg,
self.get_conf_path_from_env
)
def test_return_valid_path(self):
__here__ = os.path.abspath(__file__)
os.environ['PECAN_CONFIG'] = __here__
assert self.get_conf_path_from_env() == __here__
| |
#
# Copyright 2012 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Inspector abstraction for read-only access to hypervisors."""
import collections
from oslo_config import cfg
from oslo_log import log
from stevedore import driver
import ceilometer
from ceilometer.i18n import _
OPTS = [
cfg.StrOpt('hypervisor_inspector',
default='libvirt',
choices=('hyperv', 'libvirt', 'vmware', 'xenapi'),
help='Inspector to use for inspecting the hypervisor layer.'),
]
cfg.CONF.register_opts(OPTS)
LOG = log.getLogger(__name__)
# Named tuple representing instances.
#
# name: the name of the instance
# uuid: the UUID associated with the instance
#
Instance = collections.namedtuple('Instance', ['name', 'UUID'])
# Named tuple representing CPU statistics.
#
# number: number of CPUs
# time: cumulative CPU time
#
CPUStats = collections.namedtuple('CPUStats', ['number', 'time'])
# Named tuple representing CPU Utilization statistics.
#
# util: CPU utilization in percentage
#
CPUUtilStats = collections.namedtuple('CPUUtilStats', ['util'])
# Named tuple representing Memory usage statistics.
#
# usage: Amount of memory used
#
MemoryUsageStats = collections.namedtuple('MemoryUsageStats', ['usage'])
# Named tuple representing Resident Memory usage statistics.
#
# resident: Amount of resident memory
#
MemoryResidentStats = collections.namedtuple('MemoryResidentStats',
['resident'])
# Named tuple representing vNICs.
#
# name: the name of the vNIC
# mac: the MAC address
# fref: the filter ref
# parameters: miscellaneous parameters
#
Interface = collections.namedtuple('Interface', ['name', 'mac',
'fref', 'parameters'])
# Named tuple representing vNIC statistics.
#
# rx_bytes: number of received bytes
# rx_packets: number of received packets
# tx_bytes: number of transmitted bytes
# tx_packets: number of transmitted packets
#
InterfaceStats = collections.namedtuple('InterfaceStats',
['rx_bytes', 'rx_packets',
'tx_bytes', 'tx_packets'])
# Named tuple representing vNIC rate statistics.
#
# rx_bytes_rate: rate of received bytes
# tx_bytes_rate: rate of transmitted bytes
#
InterfaceRateStats = collections.namedtuple('InterfaceRateStats',
['rx_bytes_rate', 'tx_bytes_rate'])
# Named tuple representing disks.
#
# device: the device name for the disk
#
Disk = collections.namedtuple('Disk', ['device'])
# Named tuple representing disk statistics.
#
# read_bytes: number of bytes read
# read_requests: number of read operations
# write_bytes: number of bytes written
# write_requests: number of write operations
# errors: number of errors
#
DiskStats = collections.namedtuple('DiskStats',
['read_bytes', 'read_requests',
'write_bytes', 'write_requests',
'errors'])
# Named tuple representing disk rate statistics.
#
# read_bytes_rate: number of bytes read per second
# read_requests_rate: number of read operations per second
# write_bytes_rate: number of bytes written per second
# write_requests_rate: number of write operations per second
#
DiskRateStats = collections.namedtuple('DiskRateStats',
['read_bytes_rate',
'read_requests_rate',
'write_bytes_rate',
'write_requests_rate'])
# Named tuple representing disk latency statistics.
#
# disk_latency: average disk latency
#
DiskLatencyStats = collections.namedtuple('DiskLatencyStats',
['disk_latency'])
# Named tuple representing disk iops statistics.
#
# iops: number of iops per second
#
DiskIOPSStats = collections.namedtuple('DiskIOPSStats',
['iops_count'])
# Named tuple representing disk Information.
#
# capacity: capacity of the disk
# allocation: allocation of the disk
# physical: usage of the disk
DiskInfo = collections.namedtuple('DiskInfo',
['capacity',
'allocation',
'physical'])
# Exception types
#
class InspectorException(Exception):
def __init__(self, message=None):
super(InspectorException, self).__init__(message)
class InstanceNotFoundException(InspectorException):
pass
class InstanceShutOffException(InspectorException):
pass
class NoDataException(InspectorException):
pass
# Main virt inspector abstraction layering over the hypervisor API.
#
class Inspector(object):
def inspect_cpus(self, instance):
"""Inspect the CPU statistics for an instance.
:param instance: the target instance
:return: the number of CPUs and cumulative CPU time
"""
raise ceilometer.NotImplementedError
def inspect_cpu_util(self, instance, duration=None):
"""Inspect the CPU Utilization (%) for an instance.
:param instance: the target instance
:param duration: the last 'n' seconds, over which the value should be
inspected
:return: the percentage of CPU utilization
"""
raise ceilometer.NotImplementedError
def inspect_vnics(self, instance):
"""Inspect the vNIC statistics for an instance.
:param instance: the target instance
:return: for each vNIC, the number of bytes & packets
received and transmitted
"""
raise ceilometer.NotImplementedError
def inspect_vnic_rates(self, instance, duration=None):
"""Inspect the vNIC rate statistics for an instance.
:param instance: the target instance
:param duration: the last 'n' seconds, over which the value should be
inspected
:return: for each vNIC, the rate of bytes & packets
received and transmitted
"""
raise ceilometer.NotImplementedError
def inspect_disks(self, instance):
"""Inspect the disk statistics for an instance.
:param instance: the target instance
:return: for each disk, the number of bytes & operations
read and written, and the error count
"""
raise ceilometer.NotImplementedError
def inspect_memory_usage(self, instance, duration=None):
"""Inspect the memory usage statistics for an instance.
:param instance: the target instance
:param duration: the last 'n' seconds, over which the value should be
inspected
:return: the amount of memory used
"""
raise ceilometer.NotImplementedError
def inspect_disk_rates(self, instance, duration=None):
"""Inspect the disk statistics as rates for an instance.
:param instance: the target instance
:param duration: the last 'n' seconds, over which the value should be
inspected
:return: for each disk, the number of bytes & operations
read and written per second, with the error count
"""
raise ceilometer.NotImplementedError
def inspect_disk_latency(self, instance):
"""Inspect the disk statistics as rates for an instance.
:param instance: the target instance
:return: for each disk, the average disk latency
"""
raise ceilometer.NotImplementedError
def inspect_disk_iops(self, instance):
"""Inspect the disk statistics as rates for an instance.
:param instance: the target instance
:return: for each disk, the number of iops per second
"""
raise ceilometer.NotImplementedError
def inspect_disk_info(self, instance):
"""Inspect the disk information for an instance.
:param instance: the target instance
:return: for each disk , capacity , alloaction and usage
"""
raise ceilometer.NotImplementedError
def get_hypervisor_inspector():
try:
namespace = 'ceilometer.compute.virt'
mgr = driver.DriverManager(namespace,
cfg.CONF.hypervisor_inspector,
invoke_on_load=True)
return mgr.driver
except ImportError as e:
LOG.error(_("Unable to load the hypervisor inspector: %s") % e)
return Inspector()
| |
# Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from webob import Request
from swift.common.middleware import recon
from unittest import TestCase
from contextlib import contextmanager
from posix import stat_result, statvfs_result
import os
import swift.common.constraints
class FakeApp(object):
def __call__(self, env, start_response):
return "FAKE APP"
def start_response(*args):
pass
class OpenAndReadTester(object):
def __init__(self, output_iter):
self.index = 0
self.out_len = len(output_iter) - 1
self.data = output_iter
self.output_iter = iter(output_iter)
self.read_calls = []
self.open_calls = []
def __iter__(self):
return self
def next(self):
if self.index == self.out_len:
raise StopIteration
else:
line = self.data[self.index]
self.index += 1
return line
def read(self, *args, **kwargs):
self.read_calls.append((args, kwargs))
try:
return self.output_iter.next()
except StopIteration:
return ''
@contextmanager
def open(self, *args, **kwargs):
self.open_calls.append((args, kwargs))
yield self
class MockOS(object):
def __init__(self, ls_out=None, pe_out=None, statvfs_out=None,
lstat_out=(1, 1, 5, 4, 5, 5, 55, 55, 55, 55)):
self.ls_output = ls_out
self.path_exists_output = pe_out
self.statvfs_output = statvfs_out
self.lstat_output_tuple = lstat_out
self.listdir_calls = []
self.statvfs_calls = []
self.path_exists_calls = []
self.lstat_calls = []
def fake_listdir(self, *args, **kwargs):
self.listdir_calls.append((args, kwargs))
return self.ls_output
def fake_path_exists(self, *args, **kwargs):
self.path_exists_calls.append((args, kwargs))
return self.path_exists_output
def fake_statvfs(self, *args, **kwargs):
self.statvfs_calls.append((args, kwargs))
return statvfs_result(self.statvfs_output)
def fake_lstat(self, *args, **kwargs):
self.lstat_calls.append((args, kwargs))
return stat_result(self.lstat_output_tuple)
class TestReconSuccess(TestCase):
def setUp(self):
self.app = recon.ReconMiddleware(FakeApp(), {})
self.mockos = MockOS()
self.real_listdir = os.listdir
self.real_path_exists = os.path.exists
self.real_lstat = os.lstat
self.real_statvfs = os.statvfs
os.listdir = self.mockos.fake_listdir
os.path.exists = self.mockos.fake_path_exists
os.lstat = self.mockos.fake_lstat
os.statvfs = self.mockos.fake_statvfs
def tearDown(self):
os.listdir = self.real_listdir
os.path.exists = self.real_path_exists
os.lstat = self.real_lstat
os.statvfs = self.real_statvfs
del self.mockos
def test_get_mounted(self):
mounts_content = ['rootfs / rootfs rw 0 0',
'none /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0',
'none /proc proc rw,nosuid,nodev,noexec,relatime 0 0',
'none /dev devtmpfs rw,relatime,size=248404k,nr_inodes=62101,mode=755 0 0',
'none /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0',
'/dev/disk/by-uuid/e5b143bd-9f31-49a7-b018-5e037dc59252 / ext4 rw,relatime,errors=remount-ro,barrier=1,data=ordered 0 0',
'none /sys/fs/fuse/connections fusectl rw,relatime 0 0',
'none /sys/kernel/debug debugfs rw,relatime 0 0',
'none /sys/kernel/security securityfs rw,relatime 0 0',
'none /dev/shm tmpfs rw,nosuid,nodev,relatime 0 0',
'none /var/run tmpfs rw,nosuid,relatime,mode=755 0 0',
'none /var/lock tmpfs rw,nosuid,nodev,noexec,relatime 0 0',
'none /lib/init/rw tmpfs rw,nosuid,relatime,mode=755 0 0',
'/dev/loop0 /mnt/sdb1 xfs rw,noatime,nodiratime,attr2,nobarrier,logbufs=8,noquota 0 0',
'rpc_pipefs /var/lib/nfs/rpc_pipefs rpc_pipefs rw,relatime 0 0',
'nfsd /proc/fs/nfsd nfsd rw,relatime 0 0',
'none /proc/fs/vmblock/mountPoint vmblock rw,relatime 0 0',
'']
mounted_resp = [{'device': 'rootfs', 'path': '/'},
{'device': 'none', 'path': '/sys'},
{'device': 'none', 'path': '/proc'},
{'device': 'none', 'path': '/dev'},
{'device': 'none', 'path': '/dev/pts'},
{'device': '/dev/disk/by-uuid/e5b143bd-9f31-49a7-b018-5e037dc59252', 'path': '/'},
{'device': 'none', 'path': '/sys/fs/fuse/connections'},
{'device': 'none', 'path': '/sys/kernel/debug'},
{'device': 'none', 'path': '/sys/kernel/security'},
{'device': 'none', 'path': '/dev/shm'},
{'device': 'none', 'path': '/var/run'},
{'device': 'none', 'path': '/var/lock'},
{'device': 'none', 'path': '/lib/init/rw'},
{'device': '/dev/loop0', 'path': '/mnt/sdb1'},
{'device': 'rpc_pipefs', 'path': '/var/lib/nfs/rpc_pipefs'},
{'device': 'nfsd', 'path': '/proc/fs/nfsd'},
{'device': 'none', 'path': '/proc/fs/vmblock/mountPoint'}]
oart = OpenAndReadTester(mounts_content)
rv = self.app.get_mounted(openr=oart.open)
self.assertEquals(oart.open_calls, [(('/proc/mounts', 'r'), {})])
self.assertEquals(rv, mounted_resp)
def test_get_load(self):
oart = OpenAndReadTester(['0.03 0.03 0.00 1/220 16306'])
rv = self.app.get_load(openr=oart.open)
self.assertEquals(oart.read_calls, [((), {})])
self.assertEquals(oart.open_calls, [(('/proc/loadavg', 'r'), {})])
self.assertEquals(rv, {'5m': 0.029999999999999999, '15m': 0.0,
'processes': 16306, 'tasks': '1/220',
'1m': 0.029999999999999999})
def test_get_mem(self):
meminfo_content = ['MemTotal: 505840 kB',
'MemFree: 26588 kB',
'Buffers: 44948 kB',
'Cached: 146376 kB',
'SwapCached: 14736 kB',
'Active: 194900 kB',
'Inactive: 193412 kB',
'Active(anon): 94208 kB',
'Inactive(anon): 102848 kB',
'Active(file): 100692 kB',
'Inactive(file): 90564 kB',
'Unevictable: 0 kB',
'Mlocked: 0 kB',
'SwapTotal: 407544 kB',
'SwapFree: 313436 kB',
'Dirty: 104 kB',
'Writeback: 0 kB',
'AnonPages: 185268 kB',
'Mapped: 9592 kB',
'Shmem: 68 kB',
'Slab: 61716 kB',
'SReclaimable: 46620 kB',
'SUnreclaim: 15096 kB',
'KernelStack: 1760 kB',
'PageTables: 8832 kB',
'NFS_Unstable: 0 kB',
'Bounce: 0 kB',
'WritebackTmp: 0 kB',
'CommitLimit: 660464 kB',
'Committed_AS: 565608 kB',
'VmallocTotal: 34359738367 kB',
'VmallocUsed: 266724 kB',
'VmallocChunk: 34359467156 kB',
'HardwareCorrupted: 0 kB',
'HugePages_Total: 0',
'HugePages_Free: 0',
'HugePages_Rsvd: 0',
'HugePages_Surp: 0',
'Hugepagesize: 2048 kB',
'DirectMap4k: 10240 kB',
'DirectMap2M: 514048 kB',
'']
meminfo_resp = {'WritebackTmp': '0 kB',
'SwapTotal': '407544 kB',
'Active(anon)': '94208 kB',
'SwapFree': '313436 kB',
'DirectMap4k': '10240 kB',
'KernelStack': '1760 kB',
'MemFree': '26588 kB',
'HugePages_Rsvd': '0',
'Committed_AS': '565608 kB',
'Active(file)': '100692 kB',
'NFS_Unstable': '0 kB',
'VmallocChunk': '34359467156 kB',
'Writeback': '0 kB',
'Inactive(file)': '90564 kB',
'MemTotal': '505840 kB',
'VmallocUsed': '266724 kB',
'HugePages_Free': '0',
'AnonPages': '185268 kB',
'Active': '194900 kB',
'Inactive(anon)': '102848 kB',
'CommitLimit': '660464 kB',
'Hugepagesize': '2048 kB',
'Cached': '146376 kB',
'SwapCached': '14736 kB',
'VmallocTotal': '34359738367 kB',
'Shmem': '68 kB',
'Mapped': '9592 kB',
'SUnreclaim': '15096 kB',
'Unevictable': '0 kB',
'SReclaimable': '46620 kB',
'Mlocked': '0 kB',
'DirectMap2M': '514048 kB',
'HugePages_Surp': '0',
'Bounce': '0 kB',
'Inactive': '193412 kB',
'PageTables': '8832 kB',
'HardwareCorrupted': '0 kB',
'HugePages_Total': '0',
'Slab': '61716 kB',
'Buffers': '44948 kB',
'Dirty': '104 kB'}
oart = OpenAndReadTester(meminfo_content)
rv = self.app.get_mem(openr=oart.open)
self.assertEquals(oart.open_calls, [(('/proc/meminfo', 'r'), {})])
self.assertEquals(rv, meminfo_resp)
def test_get_async_info(self):
obj_recon_content = """{"object_replication_time": 200.0, "async_pending": 5}"""
oart = OpenAndReadTester([obj_recon_content])
rv = self.app.get_async_info(openr=oart.open)
self.assertEquals(oart.read_calls, [((), {})])
self.assertEquals(oart.open_calls, [(('/var/cache/swift/object.recon', 'r'), {})])
self.assertEquals(rv, {'async_pending': 5})
def test_get_async_info_empty_file(self):
obj_recon_content = """{"object_replication_time": 200.0}"""
oart = OpenAndReadTester([obj_recon_content])
rv = self.app.get_async_info(openr=oart.open)
self.assertEquals(oart.read_calls, [((), {})])
self.assertEquals(oart.open_calls, [(('/var/cache/swift/object.recon', 'r'), {})])
self.assertEquals(rv, {'async_pending': -1})
def test_get_replication_info(self):
obj_recon_content = """{"object_replication_time": 200.0, "async_pending": 5}"""
oart = OpenAndReadTester([obj_recon_content])
rv = self.app.get_replication_info(openr=oart.open)
self.assertEquals(oart.read_calls, [((), {})])
self.assertEquals(oart.open_calls, [(('/var/cache/swift/object.recon', 'r'), {})])
self.assertEquals(rv, {'object_replication_time': 200.0})
def test_get_replication_info_empty_file(self):
obj_recon_content = """{"async_pending": 5}"""
oart = OpenAndReadTester([obj_recon_content])
rv = self.app.get_replication_info(openr=oart.open)
self.assertEquals(oart.read_calls, [((), {})])
self.assertEquals(oart.open_calls, [(('/var/cache/swift/object.recon', 'r'), {})])
self.assertEquals(rv, {'object_replication_time': -1})
def test_get_device_info(self):
rv = self.app.get_device_info()
self.assertEquals(rv, '/srv/node/')
def test_get_unmounted(self):
def fake_checkmount_true(*args):
return True
unmounted_resp = [{'device': 'fakeone', 'mounted': False},
{'device': 'faketwo', 'mounted': False}]
self.mockos.ls_output=['fakeone', 'faketwo']
self.mockos.path_exists_output=False
real_checkmount = swift.common.constraints.check_mount
swift.common.constraints.check_mount = fake_checkmount_true
rv = self.app.get_unmounted()
swift.common.constraints.check_mount = real_checkmount
self.assertEquals(self.mockos.listdir_calls, [(('/srv/node/',), {})])
self.assertEquals(rv, unmounted_resp)
def test_get_diskusage(self):
#posix.statvfs_result(f_bsize=4096, f_frsize=4096, f_blocks=1963185,
# f_bfree=1113075, f_bavail=1013351, f_files=498736,
# f_ffree=397839, f_favail=397839, f_flag=0,
# f_namemax=255)
statvfs_content=(4096, 4096, 1963185, 1113075, 1013351, 498736, 397839,
397839, 0, 255)
du_resp = [{'device': 'canhazdrive1', 'avail': 4150685696,
'mounted': True, 'used': 3890520064, 'size': 8041205760}]
self.mockos.ls_output=['canhazdrive1']
self.mockos.statvfs_output=statvfs_content
self.mockos.path_exists_output=True
rv = self.app.get_diskusage()
self.assertEquals(self.mockos.statvfs_calls,[(('/srv/node/canhazdrive1',), {})])
self.assertEquals(rv, du_resp)
def test_get_diskusage_checkmount_fail(self):
du_resp = [{'device': 'canhazdrive1', 'avail': '',
'mounted': False, 'used': '', 'size': ''}]
self.mockos.ls_output=['canhazdrive1']
self.mockos.path_exists_output=False
rv = self.app.get_diskusage()
self.assertEquals(self.mockos.listdir_calls,[(('/srv/node/',), {})])
self.assertEquals(self.mockos.path_exists_calls,[(('/srv/node/canhazdrive1',), {})])
self.assertEquals(rv, du_resp)
def test_get_quarantine_count(self):
#posix.lstat_result(st_mode=1, st_ino=2, st_dev=3, st_nlink=4,
# st_uid=5, st_gid=6, st_size=7, st_atime=8,
# st_mtime=9, st_ctime=10)
lstat_content = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
self.mockos.ls_output=['sda']
self.mockos.path_exists_output=True
self.mockos.lstat_output=lstat_content
rv = self.app.get_quarantine_count()
self.assertEquals(rv, {'objects': 2, 'accounts': 2, 'containers': 2})
def test_get_socket_info(self):
sockstat_content = ['sockets: used 271',
'TCP: inuse 30 orphan 0 tw 0 alloc 31 mem 0',
'UDP: inuse 16 mem 4', 'UDPLITE: inuse 0',
'RAW: inuse 0', 'FRAG: inuse 0 memory 0',
'']
sockstat6_content = ['TCP6: inuse 1',
'UDP6: inuse 3',
'UDPLITE6: inuse 0',
'RAW6: inuse 0',
'FRAG6: inuse 0 memory 0',
'']
oart = OpenAndReadTester(sockstat_content)
rv = self.app.get_socket_info(openr=oart.open)
self.assertEquals(oart.open_calls, [(('/proc/net/sockstat', 'r'), {}),
(('/proc/net/sockstat6', 'r'), {})])
#todo verify parsed result of sockstat6
#self.assertEquals(rv, {'time_wait': 0, 'tcp_in_use': 30, 'orphan': 0, 'tcp_mem_allocated_bytes': 0})
class FakeRecon(object):
def fake_mem(self):
return {'memtest': "1"}
def fake_load(self):
return {'loadtest': "1"}
def fake_async(self):
return {'asynctest': "1"}
def fake_replication(self):
return {'replicationtest': "1"}
def fake_mounted(self):
return {'mountedtest': "1"}
def fake_unmounted(self):
return {'unmountedtest': "1"}
def fake_diskusage(self):
return {'diskusagetest': "1"}
def fake_ringmd5(self):
return {'ringmd5test': "1"}
def fake_quarantined(self):
return {'quarantinedtest': "1"}
def fake_sockstat(self):
return {'sockstattest': "1"}
def raise_IOError(self):
raise IOError
def raise_ValueError(self):
raise ValueError
class TestHealthCheck(unittest.TestCase):
def setUp(self):
self.frecon = FakeRecon()
self.app = recon.ReconMiddleware(FakeApp(), {})
self.app.get_mem = self.frecon.fake_mem
self.app.get_load = self.frecon.fake_load
self.app.get_async_info = self.frecon.fake_async
self.app.get_replication_info = self.frecon.fake_replication
self.app.get_mounted = self.frecon.fake_mounted
self.app.get_unmounted = self.frecon.fake_unmounted
self.app.get_diskusage = self.frecon.fake_diskusage
self.app.get_ring_md5 = self.frecon.fake_ringmd5
self.app.get_quarantine_count = self.frecon.fake_quarantined
self.app.get_socket_info = self.frecon.fake_sockstat
def test_recon_get_mem(self):
get_mem_resp = ['{"memtest": "1"}']
req = Request.blank('/recon/mem', environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_mem_resp)
def test_recon_get_load(self):
get_load_resp = ['{"loadtest": "1"}']
req = Request.blank('/recon/load', environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_load_resp)
def test_recon_get_async(self):
get_async_resp = ['{"asynctest": "1"}']
req = Request.blank('/recon/async', environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_async_resp)
def test_recon_get_async_ioerror(self):
orig = self.app.get_async_info
self.app.get_async_info = self.frecon.raise_IOError
req = Request.blank('/recon/async', environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.app.get_async_info = orig
self.assertEquals(resp, ['Internal server error.'])
def test_recon_get_replication(self):
get_replication_resp = ['{"replicationtest": "1"}']
req = Request.blank('/recon/replication', environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_replication_resp)
def test_recon_get_replication_ioerror(self):
orig = self.app.get_replication_info
self.app.get_replication_info = self.frecon.raise_IOError
req = Request.blank('/recon/replication', environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.app.get_async_info = orig
self.assertEquals(resp, ['Internal server error.'])
def test_recon_get_mounted(self):
get_mounted_resp = ['{"mountedtest": "1"}']
req = Request.blank('/recon/mounted', environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_mounted_resp)
def test_recon_get_unmounted(self):
get_unmounted_resp = ['{"unmountedtest": "1"}']
req = Request.blank('/recon/unmounted', environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_unmounted_resp)
def test_recon_get_diskusage(self):
get_diskusage_resp = ['{"diskusagetest": "1"}']
req = Request.blank('/recon/diskusage', environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_diskusage_resp)
def test_recon_get_ringmd5(self):
get_ringmd5_resp = ['{"ringmd5test": "1"}']
req = Request.blank('/recon/ringmd5', environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_ringmd5_resp)
def test_recon_get_quarantined(self):
get_quarantined_resp = ['{"quarantinedtest": "1"}']
req = Request.blank('/recon/quarantined', environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_quarantined_resp)
def test_recon_get_sockstat(self):
get_sockstat_resp = ['{"sockstattest": "1"}']
req = Request.blank('/recon/sockstat', environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_sockstat_resp)
def test_recon_invalid_path(self):
req = Request.blank('/recon/invalid', environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, ['Invalid path: /recon/invalid'])
def test_recon_failed_json_dumps(self):
orig = self.app.get_replication_info
self.app.get_replication_info = self.frecon.raise_ValueError
req = Request.blank('/recon/replication', environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.app.get_async_info = orig
self.assertEquals(resp, ['Internal server error.'])
def test_recon_pass(self):
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, 'FAKE APP')
if __name__ == '__main__':
unittest.main()
| |
import math
import string
import sys
import struct
import numpy as np
import scipy.ndimage
import scipy.stats as ss
import scipy.signal
import scipy as sp
import scipy.odr as odr
import glob
import os
import gzip
import tarfile
import shutil
import congrid
import astropy.io.ascii as ascii
import warnings
import subprocess
import astropy
import astropy.cosmology
import astropy.io.fits as pyfits
import astropy.units as u
from astropy.cosmology import WMAP7,z_at_value
from astropy.coordinates import SkyCoord
import copy
import medianstats_bootstrap as msbs
import h5py
import requests
from multiprocessing import Process, Queue, current_process
import time
import gfs_sublink_utils as gsu
import illustris_sunrise_utils as isu
ilh = 0.704
illcos = astropy.cosmology.FlatLambdaCDM(H0=70.4,Om0=0.2726,Ob0=0.0456)
start_time = time.time()
defaultparams={'stars':'ParticleIDs,Coordinates,Velocities,GFM_StellarFormationTime,GFM_Metallicity,GFM_InitialMass,Masses','gas':'ParticleIDs,Coordinates,Density,ElectronAbundance,Masses,Velocities,Volume,SubfindDensity,Potential,InternalEnergy,StarFormationRate,GFM_Metallicity,GFM_AGNRadiation,GFM_WindDMVelDisp,GFM_CoolingRate,NeutralHydrogenAbundance,SmoothingLength,SubfindHsml,SubfindVelDisp,NumTracers','dm':'Coordinates,Velocities,Potential,ParticleIDs','bhs':'all'}
baseUrl = 'http://www.illustris-project.org/api/'
headers = {"api-key":"117782db3bf216d7ce7a04d0c9034601"}
class subhalo_observation:
def __init__(self,sim,sn,sfid,i,label):
self.sim=sim
self.sn=sn
self.sfid=sfid
self.i=i
self.label=label
def get(path, params=None,savepath=None):
# make HTTP GET request to path
r = requests.get(path, params=params, headers=headers)
# raise exception if response code is not HTTP SUCCESS (200)
r.raise_for_status()
if r.headers['content-type'] == 'application/json':
return r.json() # parse json responses automatically
if 'content-disposition' in r.headers:
file_basename = r.headers['content-disposition'].split("filename=")[1]
if savepath is not None:
filename = os.path.join(savepath,file_basename)
else:
filename = file_basename
with open(filename, 'wb') as f:
f.write(r.content)
return filename # return the filename string
return r
def get_subhalo(sim,snap,sfid,params=defaultparams,savepath=None,verbose=True,clobber=False,getparent=False):
relative_path = sim+"/snapshots/" + str(snap) + "/subhalos/" + str(sfid)
url = "http://www.illustris-project.org/api/"+relative_path
#sub = get(url)
sim_url="http://www.illustris-project.org/api/"+sim
snap_url=sim_url+"/snapshots/"+str(snap)
if savepath is not None:
savepath = os.path.join(savepath,relative_path)
if not os.path.lexists(savepath):
os.makedirs(savepath)
checkf = os.path.join(savepath,'cutout_'+str(sfid)+'.hdf5')
npyf = os.path.join(savepath,'sub_'+str(sfid)+'.npy')
else:
checkf = 'cutout_'+str(sfid)+'.hdf5'
npyf = 'sub_'+str(sfid)+'.npy'
if os.path.lexists(checkf) and os.path.lexists(npyf) and clobber is False:
if verbose:
print("Subhalo cutout exists, skipping: ", checkf)
download = False
subobj = np.load(npyf)
sub = subobj.all() #?
return checkf, sub, download
else:
if verbose:
print("Getting subhalo cutout: ", checkf)
#subhalo metadata
try:
sub = get(url)
particledata=sub
if getparent is True:
url=sub['related']['parent_halo']
halo=get(url)
sim_obj=get(sim_url)
snap_obj=get(snap_url)
np.save(npyf,sub)
file = get(url+"/cutout.hdf5",params,savepath)
download = True
#add attributes to header that Sunrise needs for GFM setting.. time, Omega, npart?
with h5py.File(file,'a') as fo:
header=fo['Header']
header.attrs['Time']=1.0/(1.0 + snap_obj['redshift'])#actually scale factor
header.attrs['HubbleParam']=sim_obj['hubble']
header.attrs['Omega0']=sim_obj['omega_0']
header.attrs['OmegaLambda']=sim_obj['omega_L']
#npart = [particledata['len_gas'],particledata['len_dm'],0,0,particledata['len_stars'],particledata['len_bhs']]
npart=[0,0,0,0,0,0]
#header.attrs['NumPart_ThisFile']=np.asarray(npart)
mtable=[0,sim_obj['mass_dm'],0,0,0,0]
header.attrs['MassTable']=np.asarray(mtable)
header.attrs['Redshift']=snap_obj['redshift']
#fix issue with ParticleIDs.. ?
for pt,i in zip(['PartType0','PartType1','PartType4','PartType5'],[0,1,4,5]):
#quantities exist?
nquant=len(fo[pt].keys())
if nquant > 0:
ids_length=fo[pt]['ParticleIDs'].value.shape[0]
if ids_length > 0:
fo[pt]['ParticleIDs'][:]=np.arange(ids_length,dtype=np.uint64)
else:
ids_length=0
npart[i]=ids_length
header.attrs['NumPart_ThisFile']=np.asarray(npart)
except:
file = None
sub = None
download = False
raise
return file, sub, download
def process_subhalo(sim,snap,sfid,i,label,camera_obj,params=defaultparams,savepath=None,verbose=True,clobber=False,getlabel='StellarMass',resample=False):
#defaultparams={'stars':'Coordinates,Velocities,GFM_StellarFormationTime,GFM_Metallicity,GFM_InitialMass,Masses'}
file, sub, downloaded = get_subhalo(sim,snap,sfid,params=defaultparams,savepath=savepath,verbose=verbose,clobber=clobber)
substuff = None
if i % 100 ==0:
print("Finished: ", i, snap, sfid, downloaded, (time.time() - start_time)/60.0, file)
sys.stdout.flush()
if file is None:
print("Subhalo Failed: ", i, snap, sfid, downloaded, file)
sys.stdout.flush()
return (file, substuff)
#check if resampled cutout exists, if so don't bother loading
dirname = os.path.dirname(file)
resampfile = os.path.join(dirname,'resampled_'+str(sfid)+'.hdf5')
resamp_exists = os.path.lexists(resampfile)
#if not resamp_exists:
#load HDF5 file relevant fields
# with h5py.File(file) as h5f:
# masses = np.asarray(h5f['PartType4']['Masses'])*(1.0e10)/ilh #convert to solar masses
# formfactors = np.asarray(h5f['PartType4']['GFM_StellarFormationTime']) #in scale factor
# xpos = np.asarray(h5f['PartType4']['Coordinates'][:,0])-pos[0] #in ckpc/h
# ypos = np.asarray(h5f['PartType4']['Coordinates'][:,1])-pos[1]
# zpos = np.asarray(h5f['PartType4']['Coordinates'][:,2])-pos[2]
#print masses.shape, ypos.shape, np.mean(ypos)
#resample star particles
#project into an image to return... or output Sunrise input files as needed
#return sunrise directory... relative to savepath?
sundir = os.path.join(dirname,label)
if not os.path.lexists(sundir):
os.mkdir(sundir)
#sfrhist -- translate to center of subhalo
#mcrx -- move cameras to negative subhalo position, set FOV smartly
#camera position = -camdir*distance; distance from lightcone catalog Z and tz ? or camdir dot XYZ ?
#broadband -- always redshift?
#ideas for options: F435W, F606W, F850LP, F105W, F125W, F160W, NIRCAM, Mstar, Mgas, SFR, Simonsized H-alpha lines?
return (file, substuff)
def worker(input,output,**kwargs):
for func, args in iter(input.get,'STOP'):
f = calculate(func,args,**kwargs)
output.put(f)
def calculate(func,args,**kwargs):
result = func(*args,**kwargs)
#return '%s was given %s and got %s' % \
# (current_process().name, args, result)
return result
def get_lightcone_images_threaded(lcfile,geofile,sim='Illustris-2',clobber=False,savepath='/astro/snyder_lab2/Illustris',Np=2,maxq=10000,lim=None,label='FIELD'):
data = ascii.read(lcfile)
snapnums = np.asarray(data['col1'],dtype='str')
sfids = np.asarray(data['col2'],dtype='str')
x_cmpc = np.asarray(data['col13'])
y_cmpc = np.asarray(data['col14'])
z_cmpc = np.asarray(data['col15'])
coords_ra = np.asarray(data['col3']) #degrees
coords_dec = np.asarray(data['col4']) #degrees
catalog = ilc.process_lightcone_catalog(lightcone=geofile,basedir='.')
print(catalog.delb_arcmin)
NUMBER_OF_PROCESSES=Np
task_queue = Queue()
done_queue = Queue()
TASKS = []
TASKS_DONE = []
TASKS_LEFT = []
N_objects = snapnums.shape[0]
#figure out how to drain the queue before it fills the pipe
if lim is None:
lim=np.int64(N_objects)
print("Items to process: ", lim)
for i,sn in enumerate(snapnums[0:lim]):
this_sfid = sfids[i]
this_x = x_cmpc[i]
this_y = y_cmpc[i]
this_z = z_cmpc[i]
this_ra = coords_ra[i]
this_dec = coords_dec[i]
task = (process_subhalo,(sim,sn,this_sfid,i,label,None))
if i <= maxq:
task_queue.put(task)
TASKS.append(task)
else:
TASKS_LEFT.append(task)
for p in range(NUMBER_OF_PROCESSES):
Process(target=worker,args=(task_queue,done_queue),kwargs={'savepath':savepath,'verbose':False,'clobber':clobber}).start()
cutout_files = []
while len(TASKS_LEFT) > 0:
cutout_files.append(done_queue.get())
newtask = TASKS_LEFT.pop()
task_queue.put(newtask)
for i in range(min(maxq,lim)):
cutout_files.append(done_queue.get())
#build up images/analysis here!
print(cutout_files[0:5])
print(cutout_files[-5:])
print(len(cutout_files))
for p in range(NUMBER_OF_PROCESSES):
task_queue.put('STOP')
return None
def get_lightcone_images(lcfile,geofile,sim='Illustris-2',clobber=False,savepath='/astro/snyder_lab2/Illustris'):
data = ascii.read(lcfile)
snapnums = np.asarray(data['col1'],dtype='str')
sfids = np.asarray(data['col2'],dtype='str')
for i,sn in enumerate(snapnums):
this_sfid = sfids[i]
#get file. will skip download if it already exists
f = get_subhalo(sim,sn,this_sfid,savepath=savepath,verbose=True,clobber=clobber)
#obtain fields, place at desired position, project, compute densities and luminosities
#for projection, how? use lightcone direction? if so, must save somewhere!
return
def prep_lightcone_data(lim=-1,clobber=False,verbose=True,
lcfile='$HOME/PythonCode/mock-surveys/Lightcones/Illustris-1_RADEC_wfirst_75Mpc_11_10_xyz.txt',
geofile='$HOME/PythonCode/mock-surveys/Lightcones/hudf_75Mpc_11_10_136snaps_fixedh_xyz_NEW.txt',
sim='Illustris-1',
savepath='$HOME/oasis_project_hsc102/IllustrisData/',
label='FIELDA_11_10',
rad_fact=20.0,
run_type='images'):
lcfile=os.path.expandvars(lcfile)
geofile=os.path.expandvars(geofile)
savepath=os.path.expandvars(savepath)
data = ascii.read(lcfile)
snapnums = np.asarray(data['col1'],dtype='str')
sfids = np.asarray(data['col2'],dtype='str')
x_mpc=np.float64(np.asarray(data['col28'],dtype='str')) #physical mpc
y_mpc=np.float64(np.asarray(data['col29'],dtype='str'))
z_mpc=np.float64(np.asarray(data['col30'],dtype='str'))
redshift = np.float64(np.asarray(data['col10'],dtype='str'))
ra_deg=np.float64(np.asarray(data['col3'],dtype='str'))
dec_deg=np.float64(np.asarray(data['col4'],dtype='str'))
submitcount=0
#save image geometry -- npix, fov somewhere for image reconstruction?
lightcone_dir=os.path.join(savepath,label)
if not os.path.lexists(lightcone_dir):
os.mkdir(lightcone_dir)
shutil.copy(lcfile,lightcone_dir)
shutil.copy(geofile,lightcone_dir)
image_catalog_file=os.path.join(lightcone_dir,label+'_'+run_type+'.txt')
pixsize_arcsec=0.032
lines = open(geofile,'r')
for l in lines:
if "Comoving Single Box L" in l:
L_comoving = np.float32(l.split()[-1])
L_comovingh = round(L_comoving*gsu.ilh,4)
if "Delta Unit Vector" in l:
ss = l.split("[")[-1].split("]")[0].split()
xs = ss[0]
ys = ss[1]
zs = ss[2]
if "Direction Unit Vector" in l:
ss = l.split("[")[-1].split("]")[0].split()
xd = ss[0]
yd = ss[1]
zd = ss[2]
if "del B" in l:
delb_arcmin = np.float32(l.split()[-1])
if "del A" in l:
dela_arcmin = np.float32(l.split()[-1])
lines.close()
assert xs is not None
assert xd is not None
assert L_comoving is not None
full_fov_arcsec=60.0*delb_arcmin
npix_float = full_fov_arcsec/pixsize_arcsec
npix_int = np.int64(npix_float)
final_fov_arcsec = np.float64(npix_int)*pixsize_arcsec
icfo= open(image_catalog_file,'w')
icfo.write('sim snap sfid z RA DEC origin_i origin_j pos_i pos_j pixsize_arcsec final_fov_arcsec full_npix this_npix this_fov_kpc halfmassrad_factor nrays run_dir\n')
subfo=open(os.path.join(lightcone_dir,'submit_all_'+label+'.sh'),'w')
if lim==-1:
lim=snapnums.shape[0]
for i,sn in enumerate(snapnums[0:lim]):
this_sfid = sfids[i]
pos_mpc={}
pos_mpc['x']=x_mpc[i]
pos_mpc['y']=y_mpc[i]
pos_mpc['z']=z_mpc[i]
this_z=redshift[i]
#get file. will skip download if it already exists
f,s,d = get_subhalo(sim,sn,this_sfid,savepath=savepath,verbose=verbose,clobber=clobber)
if i % 100==0:
submitcount=submitcount+1
ret_dict=isu.setup_sunrise_lightcone(f,s,label,this_z,geofile,pos_mpc,submitcount,lightcone_dir,append=False,pixsize_arcsec=pixsize_arcsec,rad_fact=rad_fact,run_type=run_type)
subfo.write('sbatch '+ret_dict['submitfile']+'\n')
print(' Completed.. ',i)
else:
ret_dict= isu.setup_sunrise_lightcone(f,s,label,this_z,geofile,pos_mpc,submitcount,lightcone_dir,append=True,pixsize_arcsec=pixsize_arcsec,rad_fact=rad_fact,run_type=run_type)
#obtain fields, place at desired position, project, compute densities and luminosities
this_npix=ret_dict['this_npix']
this_fov_kpc=ret_dict['fov_kpc']
nrays=ret_dict['nrays']
obj_pos_i = np.float64( (ra_deg[i]*3600.0 + full_fov_arcsec/2.0)/pixsize_arcsec )
obj_pos_j = np.float64( (dec_deg[i]*3600.0 + full_fov_arcsec/2.0)/pixsize_arcsec )
origin_i = np.int64( np.round(obj_pos_i - this_npix/2.0) ) #aligns object to grid
origin_j = np.int64( np.round(obj_pos_j - this_npix/2.0) )
pos_i = origin_i + this_npix/2.0 #final true position of subhalo
pos_j = origin_j + this_npix/2.0
used_radfact=ret_dict['rad_fact']
icfo.write('{:12s} {:8d} {:12d} {:12.6f} {:12.6f} {:12.6f} {:10d} {:10d} {:12.6f} {:12.6f} '
'{:10.6f} {:16.4f} {:10d} {:10d} {:12.6f} {:12.6f} {:12d} {:100s}\n'.format(sim,np.int64(sn),np.int64(this_sfid),
np.float64(this_z),
ra_deg[i],dec_deg[i],origin_i,origin_j,pos_i,pos_j,
pixsize_arcsec,final_fov_arcsec,
npix_int,this_npix,this_fov_kpc,used_radfact,nrays,ret_dict['run_dir']))
subfo.close()
icfo.close()
return
def do_lightcone_images(savepath=None):
if savepath is None:
print("Requires savepath specification!")
exit()
lcfile = os.path.expandvars('$HOME/oasis_project/Lightcones/Illustris-2_RADEC_hudfwide_75Mpc_7_6_xyz_corners.txt')
geofile = os.path.expandvars('$HOME/oasis_project/Lightcones/hudfwide_75Mpc_7_6_fixedh_xyz_NEW.txt')
label = 'FIELDA'
#note, for Gordon, stage intermediate lightcone data on local scratch? Do this outside python: FASTER
#if not os.path.lexists(final_savepath):
# os.mkdir(final_savepath)
#print "Staging files... ", final_savepath, intermediate_savepath
#staget = time.time()
#shutil.move(final_savepath,intermediate_savepath)
#staget = time.time() - staget
#print "Staging took: ", staget, ' seconds'
#st1 = time.time()
#get_lightcone_images(lcfile,geofile,sim='Illustris-2',clobber=False,savepath=savepath)
#et1 = time.time()
st = time.time()
result = get_lightcone_images_threaded(lcfile,geofile,sim='Illustris-2',clobber=False,savepath=savepath,Np=8,lim=None,maxq=10000,label=label)
et = time.time()
#return files to temp project space
#print "UnStaging files... ", intermediate_savepath, final_savepath
#staget = time.time()
#shutil.move(intermediate_savepath,final_savepath)
#staget = time.time() - staget
#print "Un-Staging took: ", staget, ' seconds'
print('Threaded calculation took: ', et-st, ' seconds')
#print 'Serial calculation took: ', et1-st1, ' seconds'
if __name__=="__main__":
if len(sys.argv) != 2:
print("Usage: python illustris_api_utils_gordon.py SAVEPATH")
exit()
else:
savepath=sys.argv[1]
print("Saving intermediate files at: ", savepath)
do_lightcone_images(savepath=savepath)
| |
#!/usr/bin/env python
#
# Copyright 2011-2012 Andreas Wundsam
# Copyright 2011-2012 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import sys
import os.path
from copy import copy
sys.path.append(os.path.dirname(__file__) + "/../../..")
from pox.openflow.libopenflow_01 import *
from pox.datapaths.switch import *
def extract_num(buf, start, length):
""" extracts a number from a raw byte string. Assumes network byteorder """
# note: purposefully does /not/ use struct.unpack, because that is used by the code we validate
val = 0
for i in range(start, start+length):
val <<= 8
val += ord(buf[i])
return val
class ofp_match_test(unittest.TestCase):
def test_bit_wildcards(self):
""" some checking of the bit-level wildcard magic in ofp_match"""
m = ofp_match()
# all match entries should start out as wildcarded
for k,v in ofp_match_data.iteritems():
self.assertEquals(getattr(m, k), None, "Attr %s should be wildcarded and reported as None" % k)
self.assertEquals(m.wildcards & v[1], v[1])
# try setting and unsetting specific bit-level match entries
for change in [ ("in_port", 1, OFPFW_IN_PORT), ("dl_vlan", 2, OFPFW_DL_VLAN), ("tp_dst", 22, OFPFW_TP_DST) ]:
setattr(m, change[0], change[1])
self.assertEquals(getattr(m, change[0]), change[1], "Attr %s should have been set to %s" % change[0:2])
self.assertEquals(m.wildcards & change[2], 0, "with %s set to %s, wildcard bit %x should get unset" % change)
setattr(m, change[0], None)
self.assertEquals(m.wildcards & change[2], change[2], "with %s reset from %s, wildcard bit %x should be set again" % change)
def test_ip_wildcard_magic(self):
""" ofp_match: check IP wildcard magic"""
# do this for both nw_src and nw_dst
for (attr, bitmask, shift) in ( ("nw_src", OFPFW_NW_SRC_MASK, OFPFW_NW_SRC_SHIFT), ( "nw_dst", OFPFW_NW_DST_MASK, OFPFW_NW_DST_SHIFT) ):
m = ofp_match()
self.assertEquals(getattr(m, "get_"+attr)(), (None, 0), "get_%s for unset %s should return (None,0)" % (attr, attr))
self.assertTrue( ((m.wildcards & bitmask) >> shift) >= 32)
# set a bunch of ip addresses with or without networks
for ipnet in ( "10.0.0.0/8", "172.16.0.0/16", "192.168.24.0/24", "1.2.3.4/30", "212.11.225.3"):
parts = ipnet.split("/")
ip = parts[0]
bits = int(parts[1]) if len(parts)>1 else 32
# set the IP address
setattr(m, attr, ipnet)
# gets converted to just the ip address during query
self.assertEqual(getattr(m, attr), ip)
# the get_#{attr} method gives a tuple of (ip, cidr-bits)
self.assertEqual( getattr(m, "get_"+attr)(), (ip, bits))
# the appropriate bits in the wildcard should be set
self.assertEqual( (m.wildcards & bitmask) >> shift, 32-bits)
# reset to 0.0.0.0/0 results in full wildcard
setattr(m, attr, "0.0.0.0/0")
self.assertEquals(getattr(m, "get_"+attr)(), (None, 0), "get_%s for unset %s should return (None,0)" % (attr, attr))
self.assertTrue( ((m.wildcards & bitmask) >> shift) >= 32)
def test_match_with_wildcards(self):
""" ofp_match: test the matches_with_wildcards method """
def create(wildcards=(), **kw):
m = ofp_match(in_port=1, dl_type=0, dl_src=EthAddr("00:00:00:00:00:01"), dl_dst=EthAddr("00:00:00:00:00:02"), dl_vlan=5, nw_proto=6, nw_src="10.0.0.1", nw_dst="11.0.0.1", tp_src = 12345, tp_dst=80)
if isinstance(wildcards, str):
wildcards = [wildcards]
for w in wildcards:
setattr(m, w, None)
for (k,v) in kw.iteritems():
m.__setattr__(k,v)
return m
def assertMatch(ref, other, msg=""):
self.assertTrue(ref.matches_with_wildcards(other), "%s - %s should match %s " % (msg, ref.show(), other.show()))
def assertNoMatch(ref, other, msg=""):
self.assertFalse(ref.matches_with_wildcards(other), "%s - %s should NOT match %s " % (msg, ref.show(), other.show()))
ref = create()
#print ref
# same instances match
assertMatch(ref, ref)
# equal instances match
assertMatch(ref, create())
# ofp_match with additional wildcard bits set match the ref, but not the other way round
for wildcards in ( [ "in_port" ], [ "dl_vlan" ], [ "dl_src", "dl_dst" ] ):
wilder = create(wildcards=wildcards)
assertMatch(wilder, ref)
assertNoMatch(ref, wilder)
# when fields are wildcarded, we can change around the actual values and it will still match
for changes in ( { "in_port": 15 }, { "dl_src": "12:34:56:78:90:ab", "dl_vlan": 7 }, { "tp_dst" : 22 } ):
wild = create()
concrete = create()
for (k,v) in changes.iteritems():
setattr(wild, k, None)
setattr(concrete, k, v)
assertMatch(wild, concrete)
assertNoMatch(concrete, wild)
# play around with nw src addresses
assertMatch(create(nw_src="10.0.0.0/24"), ref)
assertMatch(create(nw_src="10.0.0.0/24"), create(nw_src="10.0.0.0/25"))
assertNoMatch(create(nw_src="10.0.0.0/25"), create(nw_src="10.0.0.0/24"))
assertMatch(create(nw_src="10.0.0.0/25"), create(nw_src="10.0.0.127"))
assertNoMatch(create(nw_src="10.0.0.0/25"), create(nw_src="10.0.0.128"))
class ofp_command_test(unittest.TestCase):
# custom map of POX class to header type, for validation
ofp_type = {
ofp_features_reply: OFPT_FEATURES_REPLY,
ofp_set_config: OFPT_SET_CONFIG,
ofp_flow_mod: OFPT_FLOW_MOD,
ofp_port_mod: OFPT_PORT_MOD,
ofp_queue_get_config_request: OFPT_QUEUE_GET_CONFIG_REQUEST,
ofp_queue_get_config_reply: OFPT_QUEUE_GET_CONFIG_REPLY,
ofp_stats_request: OFPT_STATS_REQUEST,
ofp_stats_reply: OFPT_STATS_REPLY,
ofp_packet_out: OFPT_PACKET_OUT,
ofp_barrier_reply: OFPT_BARRIER_REPLY,
ofp_barrier_request: OFPT_BARRIER_REQUEST,
ofp_packet_in: OFPT_PACKET_IN,
ofp_flow_removed: OFPT_FLOW_REMOVED,
ofp_port_status: OFPT_PORT_STATUS,
ofp_error: OFPT_ERROR,
ofp_hello: OFPT_HELLO,
ofp_echo_request: OFPT_ECHO_REQUEST,
ofp_echo_reply: OFPT_ECHO_REPLY,
ofp_vendor_generic: OFPT_VENDOR,
ofp_features_request: OFPT_FEATURES_REQUEST,
ofp_get_config_request: OFPT_GET_CONFIG_REQUEST,
ofp_get_config_reply: OFPT_GET_CONFIG_REPLY,
ofp_set_config: OFPT_SET_CONFIG
}
def assert_packed_header(self, pack, ofp_type, length, xid):
""" check openflow header fields in packed byte array """
def assert_num(name, start, length, expected):
val = extract_num(pack, start, length)
self.assertEquals(val, expected, "packed header check: %s for ofp type %s should be %d (is %d)" % (name, ofp_type_map[ofp_type], expected, val))
assert_num("OpenFlow version", 0, 1, 1)
assert_num("header_type", 1, 1, ofp_type)
assert_num("length in header", 2, 2, length)
assert_num("xid", 4, 4, xid)
def _test_pack_unpack(self, o, xid, ofp_type=None):
""" check that packing and unpacking an ofp object works, and that lengths etc. are correct """
show = lambda(o): o.show() if hasattr(o, "show") else str(show)
if not ofp_type:
ofp_type = self.ofp_type[type(o)]
self.assertTrue(o._assert(), "pack_unpack for %s -- original object should _assert to true"%show(o))
# show the object to make sure that works
o.show()
# pack object
pack = o.pack()
# byte array length should equal calculated length
self.assertEqual(len(o), len(pack), "pack_unpack for %s -- len(object)=%d != len(packed)=%d" % (type(o), len(o), len(pack)))
# check header fields in packed byte array
self.assert_packed_header(pack, ofp_type, len(o), xid)
# now unpack
unpacked = type(o)()
unpacked.unpack(pack)
self.assertEqual(o, unpacked, "pack_unpacked -- original != unpacked\n===Original:\n%s\n===Repacked:%s\n" % (show(o), show(unpacked)))
return unpacked
def test_header_pack_unpack(self):
for kw in ( { "header_type": OFPT_PACKET_OUT, "xid": 1 },
{ "header_type": OFPT_FLOW_MOD, "xid": 2 }):
# Can't directly pack a header, since it has no length...
class H (ofp_header):
def __len__ (self):
return 8
o = H(**kw)
self._test_pack_unpack(o, kw["xid"], kw["header_type"])
def test_pack_all_comands_simple(self):
xid_gen = xid_generator()
for cls in ( ofp_features_reply,
ofp_set_config,
ofp_get_config_reply,
ofp_flow_mod,
ofp_port_mod,
ofp_queue_get_config_request,
ofp_queue_get_config_reply,
ofp_stats_request,
ofp_stats_reply,
ofp_packet_out,
ofp_barrier_reply,
ofp_barrier_request,
ofp_packet_in,
ofp_flow_removed,
ofp_port_status,
ofp_error,
ofp_hello,
ofp_echo_request,
ofp_echo_reply,
ofp_features_request,
ofp_get_config_request,
ofp_get_config_reply,
ofp_set_config ):
xid = xid_gen()
args = {}
# Customize initializer
if cls is ofp_stats_reply:
args['body'] = ofp_desc_stats(sw_desc="POX")
elif cls is ofp_stats_request:
args['body'] = ofp_vendor_stats_generic(vendor=0xcafe)
o = cls(xid=xid, **args)
self._test_pack_unpack(o, xid)
out = ofp_action_output
dl_addr = ofp_action_dl_addr
some_actions = ([], [out(port=2)], [out(port=2), out(port=3)], [ out(port=OFPP_FLOOD) ], [ dl_addr.set_dst(EthAddr("00:"*5 + "01")), out(port=1) ])
def test_pack_custom_packet_out(self):
xid_gen = xid_generator()
packet = ethernet(src=EthAddr("00:00:00:00:00:01"), dst=EthAddr("00:00:00:00:00:02"),
payload=ipv4(srcip=IPAddr("1.2.3.4"), dstip=IPAddr("1.2.3.5"),
payload=udp(srcport=1234, dstport=53, payload="haha"))).pack()
for actions in self.some_actions:
for attrs in ( { 'data': packet }, { 'buffer_id': 5 } ):
xid = xid_gen()
o = ofp_packet_out(xid=xid, actions=actions, **attrs)
self._test_pack_unpack(o, xid, OFPT_PACKET_OUT)
def test_pack_flow_mod_openflow_dl_type_wildcards(self):
""" Openflow 1.1 spec clarifies that wildcards should not be set when the protocol in
question is not matched i.e., dl_type != 0x800 -> no wildcards for IP.
Test this here """
def show_wildcards(w):
parts = [ k.lower()[len("OFPFW_"):] for (k,v) in ofp_flow_wildcards_rev_map.iteritems() if v & w == v ]
nw_src_bits = (w & OFPFW_NW_SRC_MASK) >> OFPFW_NW_SRC_SHIFT
nw_src_bits = (w & OFPFW_NW_SRC_MASK) >> OFPFW_NW_SRC_SHIFT
if(nw_src_bits > 0): parts.append("nw_src(/%d)" % (32 - nw_src_bits))
nw_dst_bits = (w & OFPFW_NW_DST_MASK) >> OFPFW_NW_DST_SHIFT
if(nw_dst_bits > 0): parts.append("nw_dst(/%d)" % (32 - nw_dst_bits))
return "|".join(parts)
def test_wildcards(match, expected):
(packed,) = struct.unpack_from("!L", match.pack(flow_mod=True))
self.assertEquals(packed, expected, "packed: %s <> expected: %s" % (show_wildcards(packed), show_wildcards(expected)))
# no dl type specified -> wildcards for nw/dl are cleared
test_wildcards(ofp_match(), OFPFW_ALL & ~ (OFPFW_NW_TOS | OFPFW_NW_PROTO | OFPFW_NW_SRC_MASK | OFPFW_NW_DST_MASK | OFPFW_TP_SRC | OFPFW_TP_DST))
all_normalized = (OFPFW_ALL & ~ (OFPFW_NW_SRC_MASK | OFPFW_NW_DST_MASK)) | \
OFPFW_NW_SRC_ALL | OFPFW_NW_DST_ALL
# dl type = ARP -> certain wildcards live
test_wildcards(ofp_match(dl_type=0x806), all_normalized & ~ (OFPFW_NW_TOS | OFPFW_TP_SRC | OFPFW_TP_DST | OFPFW_DL_TYPE))
# dl type = IP -> more wildcards live
test_wildcards(ofp_match(dl_type=0x800), all_normalized & ~ (OFPFW_TP_SRC | OFPFW_TP_DST | OFPFW_DL_TYPE))
# dl type = IP, nw_proto=UDP -> alll wildcards live
test_wildcards(ofp_match(dl_type=0x800,nw_proto=6), all_normalized & ~(OFPFW_DL_TYPE | OFPFW_NW_PROTO))
def test_pack_custom_flow_mod(self):
out = ofp_action_output
xid_gen = xid_generator()
for match in ( ofp_match(),
ofp_match(in_port=1, dl_type=0x88cc, dl_src=EthAddr("00:00:00:00:00:01"), dl_dst=EthAddr("00:00:00:00:00:02")),
ofp_match(in_port=1, dl_type=0x0806, dl_src=EthAddr("00:00:00:00:00:01"), dl_dst=EthAddr("00:00:00:00:00:02"), nw_src="10.0.0.1", nw_dst="11.0.0.1"),
ofp_match(in_port=1, dl_type=0x0800, dl_src=EthAddr("00:00:00:00:00:01"), dl_dst=EthAddr("00:00:00:00:00:02"), dl_vlan=5, nw_proto=6, nw_src="10.0.0.1", nw_dst="11.0.0.1", tp_src = 12345, tp_dst=80)):
for actions in self.some_actions:
for command in ( OFPFC_ADD, OFPFC_DELETE, OFPFC_DELETE_STRICT, OFPFC_MODIFY_STRICT, OFPFC_MODIFY_STRICT ):
for attrs in ( {}, { 'buffer_id' : 123 }, { 'idle_timeout': 5, 'hard_timeout': 10 } ):
xid = xid_gen()
o = ofp_flow_mod(xid=xid, command=command, match = match, actions=actions, **attrs)
unpacked = self._test_pack_unpack(o, xid, OFPT_FLOW_MOD)
self.assertEqual(unpacked.match, match)
self.assertEqual(unpacked.command, command)
self.assertEqual(unpacked.actions, actions)
for (check_attr,val) in attrs.iteritems():
self.assertEqual(getattr(unpacked, check_attr), val)
class ofp_action_test(unittest.TestCase):
def assert_packed_action(self, cls, packed, a_type, length):
self.assertEqual(extract_num(packed, 0,2), a_type, "Action %s: expected type %d (but is %d)" % (cls, a_type, extract_num(packed, 0,2)))
self.assertEqual(extract_num(packed, 2,2), length, "Action %s: expected length %d (but is %d)" % (cls, length, extract_num(packed, 2,2)))
def test_pack_all_actions_simple(self):
def c(cls, a_type, kw, length):
action = cls(**kw)
packed = action.pack()
self.assertEqual(len(action), len(packed))
self.assert_packed_action(cls, packed, a_type, length)
unpacked = cls()
unpacked.unpack(packed)
self.assertEqual(action, unpacked)
for (k, v) in kw.iteritems():
self.assertEqual(getattr(unpacked, k), v)
return packed
c(ofp_action_output, OFPAT_OUTPUT, { 'port': 23 }, 8 )
c(ofp_action_enqueue, OFPAT_ENQUEUE, { 'port': 23, 'queue_id': 1 }, 16 )
c(ofp_action_vlan_vid, OFPAT_SET_VLAN_VID, { 'vlan_vid' : 123}, 8 )
c(ofp_action_vlan_pcp, OFPAT_SET_VLAN_PCP, { 'vlan_pcp' : 123}, 8 )
p = c(ofp_action_dl_addr.set_dst, OFPAT_SET_DL_DST, { 'dl_addr' : EthAddr("01:02:03:04:05:06").toRaw() }, 16 )
self.assertEquals(extract_num(p, 4,6), 0x010203040506)
p = c(ofp_action_dl_addr.set_src, OFPAT_SET_DL_SRC, { 'dl_addr' : EthAddr("ff:ee:dd:cc:bb:aa").toRaw() }, 16 )
self.assertEquals(extract_num(p, 4,6), 0xffeeddccbbaa, "Ethernet in packed is %x, but should be ff:ee:dd:cc:bb:aa" % extract_num(p, 4, 6))
p = c(ofp_action_nw_addr.set_dst, OFPAT_SET_NW_DST, { 'nw_addr' : IPAddr("1.2.3.4") }, 8 )
self.assertEquals(extract_num(p, 4,4), 0x01020304)
p = c(ofp_action_nw_addr.set_src, OFPAT_SET_NW_SRC, { 'nw_addr' : IPAddr("127.0.0.1") }, 8 )
self.assertEquals(extract_num(p, 4,4), 0x7f000001)
c(ofp_action_nw_tos, OFPAT_SET_NW_TOS, { 'nw_tos' : 4 }, 8)
p = c(ofp_action_tp_port.set_dst, OFPAT_SET_TP_DST, { 'tp_port' : 80 }, 8)
self.assertEquals(extract_num(p, 4,2), 80)
p = c(ofp_action_tp_port.set_src, OFPAT_SET_TP_SRC, { 'tp_port' : 22987 }, 8)
self.assertEquals(extract_num(p, 4,2), 22987)
# c(ofp_action_push_mpls, OFPAT_PUSH_MPLS, {'ethertype':0x8847}, 8)
# c(ofp_action_pop_mpls, OFPAT_POP_MPLS, {'ethertype':0x0800}, 8)
# c(ofp_action_mpls_dec_ttl, OFPAT_DEC_MPLS_TTL, {}, 8)
# c(ofp_action_mpls_label, OFPAT_SET_MPLS_LABEL, {'mpls_label': 0xa1f}, 8)
# c(ofp_action_mpls_tc, OFPAT_SET_MPLS_TC, {'mpls_tc': 0xac}, 8)
# c(ofp_action_mpls_ttl, OFPAT_SET_MPLS_TTL, {'mpls_ttl': 0xaf}, 8)
if __name__ == '__main__':
unittest.main()
| |
# -*- coding: utf-8 -*-
# Copyright (c) 2013, Eduard Broecker
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that
# the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this list of conditions and the
# following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#
# this script exports json-files from a canmatrix-object
# json-files are the can-matrix-definitions of the CANard-project
# (https://github.com/ericevenchick/CANard)
from __future__ import absolute_import, division, print_function
import json
import sys
import typing
from builtins import *
import canmatrix
def dump(db, f, **options):
# type: (canmatrix.CanMatrix, typing.BinaryIO, **str) -> None
export_canard = options.get('jsonCanard', False)
motorola_bit_format = options.get('jsonMotorolaBitFormat', "lsb")
export_all = options.get('jsonAll', False)
native_types = options.get('jsonNativeTypes', False)
number_converter = float if native_types else str
additional_frame_columns = [x for x in options.get("additionalFrameAttributes", "").split(",") if x]
export_array = [] # type: typing.List[typing.Union[str, float, list, dict]]
if export_canard:
for frame in db.frames:
signals = {}
for signal in frame.signals:
signals[
signal.get_startbit(
bit_numbering=1,
start_little=True)] = {
"name": signal.name,
"bit_length": signal.size,
"factor": signal.factor,
"offset": signal.offset}
export_array.append(
{"name": frame.name, "id": hex(frame.arbitration_id.id), "signals": signals})
elif export_all is False:
for frame in db.frames:
symbolic_signals = []
for signal in frame.signals:
if not signal.is_little_endian:
if motorola_bit_format == "msb":
start_bit = signal.get_startbit(bit_numbering=1)
elif motorola_bit_format == "msbreverse":
start_bit = signal.get_startbit()
else: # motorola_bit_format == "lsb"
start_bit = signal.get_startbit(bit_numbering=1, start_little=True)
else:
start_bit = signal.get_startbit(bit_numbering=1, start_little=True)
symbolic_signals.append({
"name": signal.name,
"start_bit": start_bit,
"bit_length": signal.size,
"factor": number_converter(signal.factor),
"offset": number_converter(signal.offset),
"is_big_endian": signal.is_little_endian is False,
"is_signed": signal.is_signed,
"is_float": signal.is_float,
})
symbolic_frame = {"name": frame.name,
"id": int(frame.arbitration_id.id),
"is_extended_frame": frame.arbitration_id.extended,
"signals": symbolic_signals}
frame_attributes = {
attr: frame.attribute(attr)
for attr in additional_frame_columns
if frame.attribute(attr) is not None # don't export None parameters
}
if frame_attributes: # only add attributes if there are any
symbolic_frame["attributes"] = frame_attributes
export_array.append(symbolic_frame)
else: # export_all
for frame in db.frames:
frame_attributes = {attribute: frame.attribute(attribute, db=db) for attribute in db.frame_defines}
symbolic_signals = []
for signal in frame.signals:
attributes = {attribute: signal.attribute(attribute, db=db) for attribute in db.signal_defines}
values = {key: signal.values[key] for key in signal.values}
if not signal.is_little_endian:
if motorola_bit_format == "msb":
start_bit = signal.get_startbit(bit_numbering=1)
elif motorola_bit_format == "msbreverse":
start_bit = signal.get_startbit()
else: # motorola_bit_format == "lsb"
start_bit = signal.get_startbit(bit_numbering=1, start_little=True)
else: # motorola_bit_format == "lsb"
start_bit = signal.get_startbit(bit_numbering=1, start_little=True)
symbolic_signal = {
"name": signal.name,
"start_bit": start_bit,
"bit_length": signal.size,
"factor": number_converter(signal.factor),
"offset": number_converter(signal.offset),
"min": number_converter(signal.min),
"max": number_converter(signal.max),
"is_big_endian": signal.is_little_endian is False,
"is_signed": signal.is_signed,
"is_float": signal.is_float,
"comment": signal.comment,
"attributes": attributes,
"values": values,
"is_multiplexer" : signal.is_multiplexer,
"mux_value" : signal.mux_val
}
if signal.multiplex is not None:
symbolic_signal["multiplex"] = signal.multiplex
if signal.unit:
symbolic_signal["unit"] = signal.unit
symbolic_signals.append(symbolic_signal)
export_array.append(
{"name": frame.name,
"id": int(frame.arbitration_id.id),
"is_extended_frame": frame.arbitration_id.extended,
"signals": symbolic_signals,
"attributes": frame_attributes,
"comment": frame.comment,
"length": frame.size})
if sys.version_info > (3, 0):
import io
temp = io.TextIOWrapper(f, encoding='UTF-8')
else:
temp = f
try:
json.dump({"messages": export_array}, temp, sort_keys=True,
indent=4, separators=(',', ': '))
finally:
if sys.version_info > (3, 0):
# When TextIOWrapper is garbage collected, it closes the raw stream
# unless the raw stream is detached first
temp.detach()
def load(f, **_options):
# type: (typing.BinaryIO, **str) -> canmatrix.CanMatrix
db = canmatrix.CanMatrix()
if sys.version_info > (3, 0):
import io
json_data = json.load(io.TextIOWrapper(f, encoding='UTF-8'))
else:
json_data = json.load(f)
if "messages" in json_data:
for frame in json_data["messages"]:
# new_frame = Frame(frame["id"],frame["name"],8,None)
new_frame = canmatrix.Frame(frame["name"], arbitration_id=frame["id"], size=8)
if "length" in frame:
new_frame.size = frame["length"]
new_frame.arbitration_id.extended = frame.get("is_extended_frame", False)
for signal in frame["signals"]:
is_little_endian = not signal.get("is_big_endian", False)
is_float = signal.get("is_float", False)
is_signed = signal.get("is_signed", False)
new_signal = canmatrix.Signal(
signal["name"],
start_bit=signal["start_bit"],
size=signal["bit_length"],
is_little_endian=is_little_endian,
is_signed=is_signed,
is_float=is_float,
factor=signal["factor"],
offset=signal["offset"]
)
if signal.get("min") is not None:
new_signal.min = new_signal.float_factory(signal["min"])
if signal.get("max", False):
new_signal.max = new_signal.float_factory(signal["max"])
if signal.get("unit", False):
new_signal.unit = signal["unit"]
if signal.get("multiplex", False):
new_signal.multiplex = signal["multiplex"]
if signal.get("values", False):
for key in signal["values"]:
new_signal.add_values(key, signal["values"][key])
if new_signal.is_little_endian is False:
new_signal.set_startbit(
new_signal.start_bit, bitNumbering=1, startLittle=True)
new_frame.add_signal(new_signal)
db.add_frame(new_frame)
f.close()
return db
| |
#
# Copyright 2017 Vitalii Kulanov
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from requests import utils as requests_utils
from gerritclient.v1 import base
class ChangeClient(base.BaseV1Client):
api_path = "/changes/"
def get_all(self, query, options=None, limit=None, skip=None):
"""Query changes.
:param query: Queries as a list of string
:param options: List of options to fetch additional data about changes
:param limit: Int value that allows to limit the number of changes
to be included in the output results
:param skip: Int value that allows to skip the given number of
changes from the beginning of the list
:return A list of ChangeInfo entries
"""
params = {k: v for k, v in (('o', options),
('n', limit),
('S', skip)) if v is not None}
request_path = "{api_path}{query}".format(
api_path=self.api_path,
query="?q={query}".format(query='&q='.join(query)))
return self.connection.get_request(request_path, params=params)
def get_by_id(self, change_id, detailed=False, options=None):
"""Retrieve a change.
:param change_id: Identifier that uniquely identifies one change.
:param detailed: boolean value, if True then retrieve a change with
labels, detailed labels, detailed accounts,
reviewer updates, and messages.
:param options: List of options to fetch additional data about a change
:return: ChangeInfo entity is returned that describes the change.
"""
params = {'o': options}
request_path = "{api_path}{change_id}/{detail}".format(
api_path=self.api_path,
change_id=requests_utils.quote(change_id, safe=''),
detail="detail" if detailed else "")
return self.connection.get_request(request_path, params=params)
def create(self, data):
"""Create a new change."""
return self.connection.post_request(self.api_path, json_data=data)
def delete(self, change_id):
"""Delete a change."""
request_path = "{api_path}{change_id}".format(
api_path=self.api_path,
change_id=requests_utils.quote(change_id, safe=''))
return self.connection.delete_request(request_path, data={})
def abandon(self, change_id):
"""Abandon a change."""
request_path = "{api_path}{change_id}/abandon".format(
api_path=self.api_path,
change_id=requests_utils.quote(change_id, safe=''))
return self.connection.post_request(request_path, json_data={})
def restore(self, change_id):
"""Restore a change."""
request_path = "{api_path}{change_id}/restore".format(
api_path=self.api_path,
change_id=requests_utils.quote(change_id, safe=''))
return self.connection.post_request(request_path, json_data={})
def revert(self, change_id, message=None):
"""Revert a change."""
data = {k: v for k, v in (('message', message),) if v is not None}
request_path = "{api_path}{change_id}/revert".format(
api_path=self.api_path,
change_id=requests_utils.quote(change_id, safe=''))
return self.connection.post_request(request_path, json_data=data)
def rebase(self, change_id, parent=None):
"""Rebase a change."""
data = {k: v for k, v in (('base', parent),) if v is not None}
request_path = "{api_path}{change_id}/rebase".format(
api_path=self.api_path,
change_id=requests_utils.quote(change_id, safe=''))
return self.connection.post_request(request_path, json_data=data)
def move(self, change_id, branch, message=None):
"""Move a change."""
data = {k: v for k, v in (('destination_branch', branch),
('message', message)) if v is not None}
request_path = "{api_path}{change_id}/move".format(
api_path=self.api_path,
change_id=requests_utils.quote(change_id, safe=''))
return self.connection.post_request(request_path, json_data=data)
def submit(self, change_id, on_behalf_of=None, notify=None):
"""Submit a change."""
# TODO(vkulanov): add 'notify_details' field (parameter) support
data = {k: v for k, v in (('on_behalf_of', on_behalf_of),
('notify', notify)) if v is not None}
request_path = "{api_path}{change_id}/submit".format(
api_path=self.api_path,
change_id=requests_utils.quote(change_id, safe=''))
return self.connection.post_request(request_path, json_data=data)
def get_topic(self, change_id):
"""Retrieve the topic of a change."""
request_path = "{api_path}{change_id}/topic".format(
api_path=self.api_path,
change_id=requests_utils.quote(change_id, safe=''))
return self.connection.get_request(request_path)
def set_topic(self, change_id, topic):
"""Set the topic of a change."""
data = {'topic': topic}
request_path = "{api_path}{change_id}/topic".format(
api_path=self.api_path,
change_id=requests_utils.quote(change_id, safe=''))
return self.connection.put_request(request_path, json_data=data)
def delete_topic(self, change_id):
"""Delete the topic of a change."""
request_path = "{api_path}{change_id}/topic".format(
api_path=self.api_path,
change_id=requests_utils.quote(change_id, safe=''))
return self.connection.delete_request(request_path, data={})
def get_assignee(self, change_id):
"""Retrieve the account of the user assigned to a change."""
request_path = "{api_path}{change_id}/assignee".format(
api_path=self.api_path,
change_id=requests_utils.quote(change_id, safe=''))
return self.connection.get_request(request_path)
def get_assignees(self, change_id):
"""Retrieve a list of every user ever assigned to a change."""
request_path = "{api_path}{change_id}/past_assignees".format(
api_path=self.api_path,
change_id=requests_utils.quote(change_id, safe=''))
return self.connection.get_request(request_path)
def set_assignee(self, change_id, account_id):
"""Set the assignee of a change."""
data = {'assignee': account_id}
request_path = "{api_path}{change_id}/assignee".format(
api_path=self.api_path,
change_id=requests_utils.quote(change_id, safe=''))
return self.connection.put_request(request_path, json_data=data)
def delete_assignee(self, change_id):
"""Delete the assignee of a change."""
request_path = "{api_path}{change_id}/assignee".format(
api_path=self.api_path,
change_id=requests_utils.quote(change_id, safe=''))
return self.connection.delete_request(request_path, data={})
def publish_draft(self, change_id):
"""Publish a draft change."""
request_path = "{api_path}{change_id}/publish".format(
api_path=self.api_path,
change_id=requests_utils.quote(change_id, safe=''))
return self.connection.post_request(request_path, json_data={})
def get_included(self, change_id):
"""Retrieve the branches and tags in which a change is included."""
request_path = "{api_path}{change_id}/in".format(
api_path=self.api_path,
change_id=requests_utils.quote(change_id, safe=''))
return self.connection.get_request(request_path)
def index(self, change_id):
"""Add or update the change in the secondary index."""
request_path = "{api_path}{change_id}/index".format(
api_path=self.api_path,
change_id=requests_utils.quote(change_id, safe=''))
return self.connection.post_request(request_path, json_data={})
def get_comments(self, change_id, comment_type=None):
"""List the published comments of all revisions of the change.
:param change_id: Identifier that uniquely identifies one change.
:param comment_type: Type of comments (None|'drafts'|'robotcomments')
None - published comments,
'drafts' - draft comments,
'robotcomments' - robotcomments.
:return A list of CommentInfo entries.
"""
request_path = "{api_path}{change_id}/{comment_type}".format(
api_path=self.api_path,
change_id=requests_utils.quote(change_id, safe=''),
comment_type='comments' if not comment_type else comment_type)
return self.connection.get_request(request_path)
def check_consistency(self, change_id):
"""Perform consistency checks on the change."""
request_path = "{api_path}{change_id}/check".format(
api_path=self.api_path,
change_id=requests_utils.quote(change_id, safe=''))
return self.connection.get_request(request_path)
def fix_consistency(self, change_id, is_delete=False,
expect_merged_as=False):
"""Perform consistency checks on the change and fixes any problems.
:param change_id: Identifier that uniquely identifies one change.
:param is_delete: If True, delete patch sets from the database
if they refer to missing commit options.
:param expect_merged_as: If True, check that the change is merged into
the destination branch as this exact SHA-1.
If not, insert a new patch set referring to
this commit.
:return Returns a ChangeInfo entity with the problems field values
that reflect any fixes.
"""
data = {'delete_patch_set_if_commit_missing': is_delete,
'expect_merged_as': expect_merged_as}
request_path = "{api_path}{change_id}/check".format(
api_path=self.api_path,
change_id=requests_utils.quote(change_id, safe=''))
return self.connection.post_request(request_path, json_data=data)
def get_client(connection):
return ChangeClient(connection)
| |
# Copyright 2013 Intel Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
from jacket.api.compute.openstack.compute import pci
from jacket.api.compute.openstack import wsgi
from jacket import context
from jacket.compute import exception
from jacket.objects import compute
from jacket.objects.compute import fields
from jacket.objects.compute import pci_device_pool
from jacket.compute import test
from jacket.tests.compute.unit.api.openstack import fakes
from jacket.tests.compute.unit.objects import test_pci_device
pci_stats = [{"count": 3,
"vendor_id": "8086",
"product_id": "1520",
"numa_node": 1}]
fake_compute_node = compute.ComputeNode(
pci_device_pools=pci_device_pool.from_pci_stats(pci_stats))
class FakeResponse(wsgi.ResponseObject):
pass
class PciServerControllerTestV21(test.NoDBTestCase):
def setUp(self):
super(PciServerControllerTestV21, self).setUp()
self.controller = pci.PciServerController()
self.fake_obj = {'server': {'addresses': {},
'id': 'fb08',
'name': 'a3',
'status': 'ACTIVE',
'tenant_id': '9a3af784c',
'user_id': 'e992080ac0',
}}
self.fake_list = {'servers': [{'addresses': {},
'id': 'fb08',
'name': 'a3',
'status': 'ACTIVE',
'tenant_id': '9a3af784c',
'user_id': 'e992080ac',
}]}
self._create_fake_instance()
self._create_fake_pci_device()
self.pci_device.claim(self.inst)
self.pci_device.allocate(self.inst)
def _create_fake_instance(self):
self.inst = compute.Instance()
self.inst.uuid = 'fake-inst-uuid'
self.inst.pci_devices = compute.PciDeviceList()
def _create_fake_pci_device(self):
def fake_pci_device_get_by_addr(ctxt, id, addr):
return test_pci_device.fake_db_dev
ctxt = context.get_admin_context()
self.stub_out('compute.db.pci_device_get_by_addr',
fake_pci_device_get_by_addr)
self.pci_device = compute.PciDevice.get_by_dev_addr(ctxt, 1, 'a')
def test_show(self):
def fake_get_db_instance(id):
return self.inst
resp = FakeResponse(self.fake_obj, '')
req = fakes.HTTPRequest.blank('/os-pci/1', use_admin_context=True)
self.stubs.Set(req, 'get_db_instance', fake_get_db_instance)
self.controller.show(req, resp, '1')
self.assertEqual([{'id': 1}],
resp.obj['server']['os-pci:pci_devices'])
def test_detail(self):
def fake_get_db_instance(id):
return self.inst
resp = FakeResponse(self.fake_list, '')
req = fakes.HTTPRequest.blank('/os-pci/detail',
use_admin_context=True)
self.stubs.Set(req, 'get_db_instance', fake_get_db_instance)
self.controller.detail(req, resp)
self.assertEqual([{'id': 1}],
resp.obj['servers'][0]['os-pci:pci_devices'])
class PciHypervisorControllerTestV21(test.NoDBTestCase):
def setUp(self):
super(PciHypervisorControllerTestV21, self).setUp()
self.controller = pci.PciHypervisorController()
self.fake_objs = dict(hypervisors=[
dict(id=1,
service=dict(id=1, host="compute1"),
hypervisor_type="xen",
hypervisor_version=3,
hypervisor_hostname="hyper1")])
self.fake_obj = dict(hypervisor=dict(
id=1,
service=dict(id=1, host="compute1"),
hypervisor_type="xen",
hypervisor_version=3,
hypervisor_hostname="hyper1"))
def test_show(self):
def fake_get_db_compute_node(id):
return fake_compute_node
req = fakes.HTTPRequest.blank('/os-hypervisors/1',
use_admin_context=True)
resp = FakeResponse(self.fake_obj, '')
self.stubs.Set(req, 'get_db_compute_node', fake_get_db_compute_node)
self.controller.show(req, resp, '1')
self.assertIn('os-pci:pci_stats', resp.obj['hypervisor'])
self.assertEqual(pci_stats[0],
resp.obj['hypervisor']['os-pci:pci_stats'][0])
def test_detail(self):
def fake_get_db_compute_node(id):
return fake_compute_node
req = fakes.HTTPRequest.blank('/os-hypervisors/detail',
use_admin_context=True)
resp = FakeResponse(self.fake_objs, '')
self.stubs.Set(req, 'get_db_compute_node', fake_get_db_compute_node)
self.controller.detail(req, resp)
self.assertIn('os-pci:pci_stats', resp.obj['hypervisors'][0])
self.assertEqual(pci_stats[0],
resp.obj['hypervisors'][0]['os-pci:pci_stats'][0])
class PciControlletestV21(test.NoDBTestCase):
def setUp(self):
super(PciControlletestV21, self).setUp()
self.controller = pci.PciController()
def test_show(self):
def fake_pci_device_get_by_id(context, id):
return test_pci_device.fake_db_dev
self.stub_out('compute.db.pci_device_get_by_id',
fake_pci_device_get_by_id)
req = fakes.HTTPRequest.blank('/os-pci/1', use_admin_context=True)
result = self.controller.show(req, '1')
dist = {'pci_device': {'address': 'a',
'compute_node_id': 1,
'dev_id': 'i',
'extra_info': {},
'dev_type': fields.PciDeviceType.STANDARD,
'id': 1,
'server_uuid': None,
'label': 'l',
'product_id': 'p',
'status': 'available',
'vendor_id': 'v'}}
self.assertEqual(dist, result)
def test_show_error_id(self):
def fake_pci_device_get_by_id(context, id):
raise exception.PciDeviceNotFoundById(id=id)
self.stub_out('compute.db.pci_device_get_by_id',
fake_pci_device_get_by_id)
req = fakes.HTTPRequest.blank('/os-pci/0', use_admin_context=True)
self.assertRaises(exc.HTTPNotFound, self.controller.show, req, '0')
def _fake_compute_node_get_all(self, context):
return [compute.ComputeNode(id=1,
service_id=1,
host='fake',
cpu_info='cpu_info',
disk_available_least=100)]
def _fake_pci_device_get_all_by_node(self, context, node):
return [test_pci_device.fake_db_dev, test_pci_device.fake_db_dev_1]
def test_index(self):
self.stubs.Set(self.controller.host_api, 'compute_node_get_all',
self._fake_compute_node_get_all)
self.stub_out('compute.db.pci_device_get_all_by_node',
self._fake_pci_device_get_all_by_node)
req = fakes.HTTPRequest.blank('/os-pci', use_admin_context=True)
result = self.controller.index(req)
dist = {'pci_devices': [test_pci_device.fake_db_dev,
test_pci_device.fake_db_dev_1]}
for i in range(len(result['pci_devices'])):
self.assertEqual(dist['pci_devices'][i]['vendor_id'],
result['pci_devices'][i]['vendor_id'])
self.assertEqual(dist['pci_devices'][i]['id'],
result['pci_devices'][i]['id'])
self.assertEqual(dist['pci_devices'][i]['status'],
result['pci_devices'][i]['status'])
self.assertEqual(dist['pci_devices'][i]['address'],
result['pci_devices'][i]['address'])
def test_detail(self):
self.stubs.Set(self.controller.host_api, 'compute_node_get_all',
self._fake_compute_node_get_all)
self.stub_out('compute.db.pci_device_get_all_by_node',
self._fake_pci_device_get_all_by_node)
req = fakes.HTTPRequest.blank('/os-pci/detail',
use_admin_context=True)
result = self.controller.detail(req)
dist = {'pci_devices': [test_pci_device.fake_db_dev,
test_pci_device.fake_db_dev_1]}
for i in range(len(result['pci_devices'])):
self.assertEqual(dist['pci_devices'][i]['vendor_id'],
result['pci_devices'][i]['vendor_id'])
self.assertEqual(dist['pci_devices'][i]['id'],
result['pci_devices'][i]['id'])
self.assertEqual(dist['pci_devices'][i]['label'],
result['pci_devices'][i]['label'])
self.assertEqual(dist['pci_devices'][i]['dev_id'],
result['pci_devices'][i]['dev_id'])
class PciControllerPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(PciControllerPolicyEnforcementV21, self).setUp()
self.controller = pci.PciController()
self.req = fakes.HTTPRequest.blank('')
def _test_policy_failed(self, action, *args):
rule_name = "os_compute_api:os-pci:%s" % action
rule = {rule_name: "project:non_fake"}
self.policy.set_rules(rule)
exc = self.assertRaises(
exception.PolicyNotAuthorized, getattr(self.controller, action),
self.req, *args)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_index_policy_failed(self):
self._test_policy_failed('index')
def test_detail_policy_failed(self):
self._test_policy_failed('detail')
def test_show_policy_failed(self):
self._test_policy_failed('show', 1)
| |
"""Old stat file parser, kept as legacy code and unmaintained due to the files no longer being generated."""
import re
from datetime import datetime
from typing import Text
from flask import current_app
from werkzeug import LocalProxy
from app import models
logger = LocalProxy(lambda: current_app.logger)
db = LocalProxy(lambda: current_app.db)
def parse(text, filename):
"""Parse the raw text of a stat file. Requires a file name to check for a duplicate entry."""
q = db.session.query(models.Match.parsed_file).filter(models.Match.parsed_file == filename)
try:
if(q.first()):
logger.warning(" ~ ~ Duplicate parse entry detected.)\n ~ ~ Request filename: %s" +
"\n ~ ~ Stored filename: %s",
filename, q.first().parsed_file)
return False
else:
logger.debug('Starting parse of %r' % filename)
match = models.Match()
match.parsed_file = filename
# Regex is in format yyyy-dd-mm
search_str = '^statistics_((?:19|20)\d{2})[\. .](0[1-9]|[12][0-9]|3[01])[\. .](0[1-9]|1[012])(?:.*)\.txt$'
file_date = re.search(search_str, filename)
if file_date is None or len(file_date.groups()) != 3:
logger.warning('Invalid filename for timestamp: %r' % filename)
return False
match.date = datetime.date(int(file_date.group(1)), int(file_date.group(3)), int(file_date.group(2)))
db.session.add(match)
try:
db.session.flush()
except Exception:
print("PANIC")
logger.error('Error flushing DB session: {0}'.format(Exception.message))
return False
lines = text.splitlines()
for line in lines:
try:
parse_line(line, match)
except Exception:
print("PANIC")
logger.error('Error parsing line: {0}\n{1}'.format(line, Exception.message))
db.session.rollback()
return False
db.session.flush()
db.session.commit()
except Exception:
logger.error('Error parsing line: {0}\n{1}'.format(line, Exception.message))
return False
return True
# Format is YYYY.MM.DD.HH.MM.SS
def format_timestamp(timestamp: Text) -> datetime:
"""Format a timestamp stored in stat files to a DateTime."""
expected_timestamp_format = '^(\d{4})\.(0?[1-9]|1[012])\.(0?[1-9]|[12][0-9]|3[01])\.(?:(?:([01]?\d|2[0-3])\.)?([0-5]?\d)\.)?([0-5]?\d)$' # noqa: E501
searched = re.search(expected_timestamp_format, timestamp)
year = int(searched.group(1))
month = int(searched.group(2))
day = int(searched.group(3))
hour = int(searched.group(4))
minute = int(searched.group(5))
second = int(searched.group(6))
dated = datetime(year, month, day, hour, minute, second)
return dated
def parse_line(line: Text, match: models.Match) -> bool:
"""Parse a single line from a stat file."""
x = line.split('|')
x = nullparse(x)
if x[0] in lineParseFunctions:
lineParseFunctions[x[0]](x, match)
elif x[0] not in 'WRITE_COMPLETE':
logger.warning('Unhandled line during parsing: %r\n Full line:\n%r', str(x[0]), '|'.join(x))
return True
# Someone pointed out that I might have been using this incorrectly, but it's legacy stuff so oh well
def nullparse(s: Text) -> Text:
"""Convert 'null' or empty entries in a statfile line to None."""
for sstring in s:
if sstring == '' or sstring.lower() == 'null':
sstring = None
return s
def truefalse(s: Text) -> bool:
"""Parse 1/0 to true/false."""
if s == '1':
return True
return False
lineParseFunctions = {}
def lineparse_function(string: Text):
"""Decorate a function so we don't have to type an otherwise cumbersome array addition."""
def inner(func):
lineParseFunctions[string] = func
return inner
@lineparse_function('STATLOG_START')
def lineparse_statlog_start(line: Text, match: models.Match):
match.data_version = line[1]
match.mapname = line[2]
match.start_datetime = format_timestamp(line[3])
match.end_datetime = format_timestamp(line[4])
match.round_length = (match.end_datetime - match.start_datetime).total_seconds()
@lineparse_function('MASTERMODE')
def lineparse_mastermode(line: Text, match: models.Match):
match.mastermode = line[1]
@lineparse_function('GAMEMODE')
def lineparse_gamemode(line: Text, match: models.Match):
del line[0]
match.modes_string = '|'.join(line)
@lineparse_function('TECH_TOTAL')
def lineparse_techtotal(line: Text, match: models.Match):
match.tech_total = line[1]
@lineparse_function('BLOOD_SPILLED')
def lineparse_bloodspilled(line: Text, match: models.Match):
match.blood_spilled = line[1]
@lineparse_function('CRATES_ORDERED')
def lineparse_crates_ordered(line: Text, match: models.Match):
match.crates_ordered = line[1]
@lineparse_function('ARTIFACTS_DISCOVERED')
def lineparse_artifacts_discovered(line: Text, match: models.Match):
match.artifacts_discovered = line[1]
@lineparse_function('CREWSCORE')
def lineparse_crewscore(line: Text, match: models.Match):
match.crewscore = line[1]
@lineparse_function('NUKED')
def lineparse_nuked(line: Text, match: models.Match):
match.nuked = truefalse(line[1])
@lineparse_function('ESCAPEES')
def lineparse_escapees(line: Text, match: models.Match):
match.escapees = line[1]
@lineparse_function('MOB_DEATH')
def lineparse_mobdeath(line: Text, match: models.Match):
d = models.Death(match_id=match.id)
d.mindname = nullparse(line[9])
d.mindkey = nullparse(line[8])
d.timeofdeath = line[3]
d.typepath = line[1]
d.special_role = nullparse(line[2])
d.last_assailant = line[4]
d.death_x = line[5]
d.death_y = line[6]
d.death_z = line[7]
db.session.add(d)
@lineparse_function('ANTAG_OBJ')
def lineparse_antagobj(line: Text, match: models.Match):
a = models.AntagObjective(match_id=match.id)
a.mindname = nullparse(line[1])
a.mindkey = nullparse(line[2])
a.special_role = nullparse(line[3])
a.objective_type = line[4]
a.objective_desc = line[6]
# Check if this is a targeted objective or not.
if line[5].isdigit():
a.objective_succeeded = int(line[5])
else:
a.objective_succeeded = int(line[8])
a.target_name = line[7]
a.target_role = line[6]
if a.objective_succeeded >= 2: # Mutiny gives 2 as an additional success value.
a.objective_succeeded = 1
db.session.add(a)
@lineparse_function('EXPLOSION')
def lineparse_explosion(line: Text, match: models.Match):
e = models.Explosion(match_id=match.id)
e.epicenter_x = line[1]
e.epicenter_y = line[2]
e.epicenter_z = line[3]
e.devestation_range = line[4]
e.heavy_impact_range = line[5]
e.light_impact_range = line[6]
e.max_range = line[7]
db.session.add(e)
@lineparse_function('UPLINK_ITEM')
def lineparse_uplinkitem(line: Text, match: models.Match):
u = models.UplinkBuy(match_id=match.id)
u.mindname = line[2]
u.mindkey = line[1]
u.traitor_buyer = truefalse(line[3])
u.bundle_path = line[4]
u.item_path = line[5]
db.session.add(u)
@lineparse_function('BADASS_BUNDLE')
def lineparse_badassbundle(line: Text, match: models.Match):
bb = models.BadassBundleBuy(match_id=match.id)
bb.mindname = line[2]
bb.mindkey = line[1]
bb.traitor_buyer = truefalse(line[3])
db.session.add(bb)
items = line[4]
for item in items:
i = models.BadassBundleItem(badass_bundle_id=bb.id)
i.item_path = item
db.session.add(i)
@lineparse_function('CULTSTATS')
def lineparse_cultstats(line: Text, match: models.Match):
match.cult_runes_written = line[1]
match.cult_runes_fumbled = line[2]
match.cult_runes_nulled = line[3]
match.cult_converted = line[4]
match.cult_tomes_created = line[5]
match.cult_narsie_summoned = truefalse(line[6])
match.cult_narsie_corpses_fed = line[7]
match.cult_surviving_cultists = line[8]
match.cult_deconverted = line[9]
@lineparse_function('XENOSTATS')
def lineparse_xenostats(line: Text, match: models.Match):
match.xeno_eggs_laid = line[1]
match.xeno_faces_hugged = line[2]
match.xeno_faces_protected = line[3]
@lineparse_function('BLOBSTATS')
def lineparse_blobstats(line: Text, match: models.Match):
match.blob_wins = line[1]
match.blob_spawned_blob_players = line[2]
match.spores_spawned = line[3]
match.res_generated = line[3]
@lineparse_function('MALFSTATS')
def lineparse_malfstats(line: Text, match: models.Match):
match.malf_won = line[1]
match.malf_shunted = line[2]
match.borgs_at_roundend = line[3]
@lineparse_function('MALFMODULES')
def lineparse_malfmodules(line: Text, match: models.Match):
del line[0]
match.malf_modules = '|'.join(line)
@lineparse_function('REVSQUADSTATS')
def lineparse_revsquadstats(line: Text, match: models.Match):
match.revsquad_won = line[1]
match.remaining_heads = line[2]
@lineparse_function('POPCOUNT')
def lineparse_popcount(line: Text, match: models.Match):
pc = models.PopulationSnapshot(match_id=match.id)
pc.popcount = line[2]
timestamp_string = line[1]
# yyyy-mm-dd hh:mm:ss
timestamp_pattern = '(\d{4})-(\d{2})-(\d{2}) (\d{2}):(\d{2}):(\d{2})'
timestamp_regex = re.search(timestamp_pattern, timestamp_string)
year = int(timestamp_regex.group(1))
month = int(timestamp_regex.group(2))
day = int(timestamp_regex.group(3))
hour = int(timestamp_regex.group(4))
minute = int(timestamp_regex.group(5))
second = int(timestamp_regex.group(6))
timestamp_dt = datetime(year, month, day, hour, minute, second)
pc.time = timestamp_dt
db.session.add(pc)
| |
# -!- encoding: utf-8 -!-
import os
from utils import Toggler, infinity
def gen_player_links(players, team_id):
ps = list()
for p in players:
if hasattr(p, 'team_id') and p.team_id == team_id:
ps.append('<a href="#%s">%s</a>' % (p.slug_nick, p.nick))
return ", ".join(ps)
def pluralize(number, unit, emph_bound=0):
if 1 == number:
return "%d %s" % (number, unit)
else: # plural
if emph_bound > 0 and number >= emph_bound:
return "<strong>%d %ss</strong>" % (number, unit)
else:
return "%d %ss" % (number, unit)
def general_game_info(game, levelshots):
html = '<div class="game_stats">\n'
html += '<img src="%s/%s.jpg" />' % (levelshots, game.mapname)
html += '<table class="game_info">\n'
html += "<tr><td>Map</td><td>%s</td></tr>\n" % (game.mapname)
html += "<tr><td>Date</td><td>%s</td></tr>\n" % (game.datetime)
html += "<tr><td>Game type</td><td>%s</td></tr>\n" % (game.gametype)
html += '<tr><td>Total Frags</td><td><a href="#kill_matrix">%d</a></td></tr>\n' % (game.frag_count)
html += "</table>\n"
if not 1 in game.teams:
print "Not a CTF game!" # TODO
return html
html += '<table>\n<tr class="game_result">\n'
html += '<td class="team_red">%s</td>' % game.teams[1].capture_count
html += "<td>vs</td>"
html += '<td class="team_blue">%s</td>' % game.teams[2].capture_count
html += "</tr><tr>"
players = game.sortedPlayers()
html += '<td>%s</td><td> </td><td>%s</td>' % (gen_player_links(players, 1), gen_player_links(players, 2))
html += "</tr>\n</table>\n"
html += "</div>\n\n"
return html
def _award_html(award):
return '<span class="award" title="%s">%s</span>' %\
(award.description, award.name)
def _player_html(player):
return '<a class="team_%s" href="#%s">%s</a>' %\
(player.team_color, player.slug_nick, player.nick)
def emph_percentage(hitrate, lower_bound, text=""):
if hitrate == infinity:
return "-"
elif hitrate >= lower_bound and hitrate != infinity:
return "<strong>%.1f%%%s</strong>" % (hitrate*100, text)
else:
return "%.1f%%%s" % (hitrate*100, text)
def emph_int(value, lower_bound, text=""):
if value >= lower_bound:
return "<strong>%d%s</strong>" % (value, text)
else:
return "%d%s" % (value, text)
_WEAPONS = [
# internal key, descriptive name, hitrate emphasize
('gauntlet', 'Gauntlet', 1),
('machinegun', "Machine gun", 30),
('shotgun', 'Shotgun', 20),
('rocketlauncher', 'Rocket launcher', 30),
('plasmagun', 'Plasma gun', 20),
('grenadelauncher', 'Grenade launcher', 10),
('lightninggun', 'Lightning gun', 30),
('railgun', 'Railgun', 40),
('bfg', 'Big F***ing Gun', 1),
('teleport', 'Teleport', 100.0),
('environment', 'Environment', 1),
]
_WEAPON_NAMES = dict()
for w, name, x in _WEAPONS:
_WEAPON_NAMES[w] = name
_WEAPON_NAMES[None] = 'None'
_BORING_STATS = "chat_length elo team_damage_given team_kills flag_returns score suicides dmg_kill_ratio health armor".split(" ")
def player_info(player, weapon_maxima, attr_maxima):
def max(key):
return attr_maxima.get(key, infinity)
html = '<div class="player_stats" id="%s">\n' % player.slug_nick
html += '<table class="player_info">\n'
odd = Toggler("even", "odd")
html += '<tr><td colspan="2" class="name team_%s"><strong>%s</strong> (<span class="elo" title="ELO Rating by frags">%d</span>)</td></tr>\n' % (player.team_color, player.nick, player.elo*1000)
html += '<tr class="%s"><th>Weapons</th><td><span title="Most shots (normalized by reload times)">%s</span> / <span title="Most kills">%s</span></td></tr>\n' %\
(odd, _WEAPON_NAMES[player.weapon_most_shots], _WEAPON_NAMES[player.weapon_most_kills])
html += '<tr class="%s"><th>Player mostly</th><td>fragged by %s / fragging %s </td></tr>\n' % (odd, player.worst_enemy.nick, player.easiest_prey.nick)
html += '<tr class="%s"><th>Frag rate</th><td>%s <span class="aside">(%s / %s)</span></td></tr>\n' %\
(odd,\
emph_percentage(player.fragrate, max('fragrate')),\
emph_int(player.kill_count, max('kill_count')),\
emph_int(player.death_count, max('death_count')))
html += '<tr class="%s"><th>Damage rate</th><td>%s <span class="aside">(%s / %s)</span></td></tr>\n' % (odd,\
emph_percentage(player.damage_rate, max('damage_rate')),\
emph_int(player.damage_given, max('damage_given')),\
emph_int(player.damage_received, max('damage_received')))
html += '<tr class="%s"><th>Cap rate</th><td>%s <span class="aside">(%s / %s)</span></td></tr>\n' % (odd,\
emph_percentage(player.caprate, max('caprate')),\
emph_int(player.flag_caps, max('flag_caps')),\
emph_int(player.flag_touches, max('flag_touches')))
html += '<tr class="%s"><th>Streaks</th><td>%s %s %s</td></tr>\n' %\
(odd,\
pluralize(player.kill_streak, "frag", max('kill_streak')),\
pluralize(player.death_streak, "death", max('death_streak')),\
pluralize(player.cap_streak, "cap", max('cap_streak')))
html += '<tr><td colspan="2">'
html += '<div class="debug_stats">\n'
for attr in _BORING_STATS:
html += '<span>%s=%s</span>; \n' % (attr, getattr(player, attr))
html += "</div>\n"
html += '</tr></td>'
html += "</table>\n"
html += '<table class="weapon_info">\n'
html += "<tr><th>Weapon</th><th>Hitrate</th><th>Fragrate</th></tr>\n"
odd = Toggler("odd", "even")
for w, wname, emph_rate in _WEAPONS:
stats = getattr(player, w, None)
if not stats:
continue
if int(stats['shots']) < 1:
continue
def max(key):
return weapon_maxima.get(w, dict()).get(key, infinity)
html += '<tr class="%s"><td>%s</td>' % (odd, wname)
html += '<td class="rate">%s / %s = %s</td>' %\
(emph_int(stats['hits'], max('hits')),\
emph_int(stats['shots'], max('shots')),\
emph_percentage(stats['hitrate'], max('hitrate')))
html += '<td class="rate">%s / %s = %s</td>' %\
(emph_int(stats['kills'], max('kills')),\
emph_int(stats['deaths'], max('deaths')),\
emph_percentage(stats['killrate'], max('killrate')))
html += "</tr>\n"
html += "</table>\n"
html += "</div>\n"
return html
def kill_matrix(game):
html = "<table>\n"
def compare(p1, p2):
return cmp(p1.team_id, p2.team_id) or cmp(p1.kill_count, p2.kill_count)
def filter(p):
return hasattr(p, 'player_kill_count')
ps = game.sortedPlayers(compare=compare, include=filter)
html += "<tr><th>Frag diff</th>"
for p in ps:
html += '<th class="team_%s">%s</th>' % (p.team_color, p.nick)
html += "<th>Total</th></tr>\n"
odd = Toggler("even", "odd")
for p in ps:
html += '<tr class="%s"><th class="team_%s">%s</th>' % (odd, p.team_color, p.nick)
diff_count = 0
for p2 in ps:
kill_count = p.player_kill_count.get(p2, 0)
death_count = p.player_death_count.get(p2, 0)
diff = kill_count - death_count
diff_count += diff
teamkill = ""
if p.team_id == p2.team_id and kill_count > 0:
teamkill = ' teamkill'
html += '<td class="kill_count %s" title="%d kills - %d deaths">%d</td>' % (teamkill, kill_count, death_count, diff)
html += '<td class="kill_count">%s</td></tr>\n' % (diff_count)
html += "</table>"
return html
def award_table(players):
html = ""
awards = list()
for p in players:
if not hasattr(p, 'awards'):
continue
for a in p.awards:
awards.append((p,a))
awards.sort(cmp=lambda (p,a), (p2,a2): cmp(a.name,a2.name))
# merge same awards
awards_copy = awards[:]
awards = []
for p, a in awards_copy:
if awards and a.name == awards[-1][1].name:
awards[-1][0].append(p)
else:
awards.append(([p], a))
# output
for ps, a in awards:
winners = " ".join(_player_html(p) for p in ps)
award = _award_html(a)
img = a.img_url or "media/award.png"
img_base = os.path.basename(img)
html += u'<div class="award"><div class="symbol"><img src="%s" alt="%s" /></div><div class="name">%s</div>\
<div class="winner">%s</div></div>\n' % (img, img_base, award, winners)
return html.encode('utf-8')
_HTML = """\
<html>
<head>
<title>%s - ArenaStats Game Report</title>
<link rel="stylesheet" type="text/css" href="media/style.css" /
</head>
<body>
<h1>ArenaStats Game Report</h1>
%s
<div id="footer">
Also view the <a href="players.html">Player Overview</a>!
</div>
</body></html>
"""
def html_report(game, levelshots):
html = ""
html += general_game_info(game, levelshots)
html += '<div id="award_table">\n'
html += award_table(game.sortedPlayers())
html += '</div>\n'
html += '<div id="kill_matrix">\n'
html += kill_matrix(game)
html += '</div>\n'
html += '<div id="red_team" class="red_players">\n'
for p in game.sortedPlayers():
if hasattr(p, 'team_id') and p.team_id == 1:
html += player_info(p, game.weapon_maxima, game.attr_maxima)
html += '</div>\n'
html += '<div id="blue_team" class="blue_players">\n'
for p in game.players.values():
if hasattr(p, 'team_id') and p.team_id == 2:
html += player_info(p, game.weapon_maxima, game.attr_maxima)
html += '</div>\n'
return _HTML % (game.mapname, html)
| |
import unittest
import urllib2, urlparse
from StringIO import StringIO
import json
import webapp2
from markupsafe import Markup, escape
import webtest
from google.appengine.api import mail
from google.appengine.ext import testbed
import server.lib.formValidation as formValidation
from server.controllers.contact import ContactHandler
from server.controllers.html import ContentHandler
class testContactHandler(unittest.TestCase):
def setUp(self):
# logging.warning(sys.path)
app = webapp2.WSGIApplication([
webapp2.Route('/contact', handler='server.controllers.contact.ContactHandler', name='contact-form'),
webapp2.Route('/contact/sent', handler='server.controllers.html.ContentHandler', name='contact-sent'),
webapp2.Route('/contact/not-human', handler='server.controllers.html.ContentHandler', name='contact-nothuman'),
webapp2.Route('/contact/misconfigured', handler='server.controllers.html.ContentHandler', name='contact-misconfig')
])
self.testApp = webtest.TestApp(app)
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_mail_stub()
self.testbed.init_urlfetch_stub()
self.mail_stub = self.testbed.get_stub(testbed.MAIL_SERVICE_NAME)
self.goodPost = {
'email': 'alex@gmail.com',
'g-recaptcha-response': 'value-of-g-recaptcha-response',
'message': 'A message',
'name': 'Alex Bransby-Williams'
}
self.recaptchaSecret = '6LdT9_4SAAAAAKvBtC9hi-nf7kaoQeGDIdMcXxXi'
def teardown(self):
self.testbed.deactivate()
def testGet(self):
response = self.testApp.get('/contact')
# webtest checks that the response code is 2xx or 3xx
# self.assertEqual(response.status_int, 200)
self.assertEqual(response.content_type, 'text/html')
def testBadPost(self):
badParams = formValidation.combineRequiredFields(self.goodPost)
for params in badParams:
response = self.testApp.post('/contact', params, status=400)
def testInvalidEmail(self):
args = self.goodPost
args['email'] = 'bad email'
response = self.testApp.post('/contact', args)
form = response.form
self.assertEqual('bad email', form['email'].value)
self.assertEqual(args['name'], form['name'].value)
self.assertEqual(args['message'], form['message'].value)
# g-recaptcha-response is inserted via JavaScript
self.assertTrue('g-recaptcha-response' not in form.fields)
def testUserInputEscapedWhenFormRedisplayed(self):
args = {
'email': '<script type="text/javascript">alert("Script injection");</script>',
'g-recaptcha-response': '<script type="text/javascript">alert("Script injection");</script>',
'name': '<script type="text/javascript">alert("Script injection");</script>',
'message': '<script type="text/javascript">alert("Script injection");</script>'
}
response = self.testApp.post('/contact', args)
form = response.form
field = response.html.find(id='inputEmail1')
# If the arg values aren't escaped then the html parser
# webtest uses (Beautiful Soup) will be unable to recover
# the values we posted. So, ensuring that the values we get
# back match the ones we posted means the values are
# escaped when they're re-displayed
self.assertEqual(args['email'], form['email'].value)
self.assertEqual(args['name'], form['name'].value)
self.assertEqual(args['message'], form['message'].value)
def testUserInputIsTrimmedWhitespaceWhenFormRedisplayed(self):
args = {
'email': ' alexgmail.com ',
'g-recaptcha-response': self.recaptchaSecret,
'name': ' Alex ',
'message': ' This is a message '
}
response = self.testApp.post('/contact', args)
form = response.form
self.assertEqual('alexgmail.com', form['email'].value)
self.assertEqual('Alex', form['name'].value)
self.assertEqual('This is a message', form['message'].value)
def testGoodPost(self):
mockHandler = MockHTTPSHandler(mock_good_recaptcha)
url_opener = urllib2.build_opener(mockHandler)
urllib2.install_opener(url_opener)
response = self.testApp.post('/contact', self.goodPost, )
(recaptchaUrl, recapthaQueryArgs) = mockHandler.full_url.split('?')
recapthaQueryArgs = urlparse.parse_qs(recapthaQueryArgs)
self.assertEqual(mockHandler.method, 'GET')
self.assertEqual(recaptchaUrl, 'https://www.google.com/recaptcha/api/siteverify')
self.assertEqual(recapthaQueryArgs['secret'][0], self.recaptchaSecret)
self.assertEqual(recapthaQueryArgs['response'][0], self.goodPost['g-recaptcha-response'])
# request.client_addr appears to be unavailble in GAE live environment
# self.assertEqual(recapthaQueryArgs['remoteip'][0], '127.0.0.1')
self.assertEqual(response.status_int, 302)
response = response.follow()
self.assertMessageSent(response)
def testInvalidRecapthaSecret(self):
mockHandler = MockHTTPSHandler(mock_invalid_recaptcha_secret)
url_opener = urllib2.build_opener(mockHandler)
urllib2.install_opener(url_opener)
response = self.testApp.post('/contact', self.goodPost, extra_environ=dict(REMOTE_ADDR='127.0.0.1'))
messages = self.mail_stub.get_sent_messages(to='alex.bransbywilliams@gmail.com')
self.assertEqual(1, len(messages))
self.assertEqual('alex.bransbywilliams@gmail.com', messages[0].to)
self.assertEqual('CV: Invalid recaptcha secret', messages[0].subject)
self.assertEqual('Google Recaptcha says the secret "' + self.recaptchaSecret + '" is invalid. Contact form is broken.', messages[0].body.decode())
self.assertEqual(response.status_int, 302)
response = response.follow()
self.assertEqual('That Could Of Gone Better...', response.html.h1.string)
def testRecaptchaDown(self):
mockHandler = MockHTTPSHandler(mock_recaptcha_down)
url_opener = urllib2.build_opener(mockHandler)
urllib2.install_opener(url_opener)
response = self.testApp.post('/contact', self.goodPost)
# We don't want to penalise users because recaptcha is down
# so just send the mail and we'll live with any spam received
# until recaptcha recovers
self.assertEqual(response.status_int, 302)
response = response.follow()
self.assertMessageSent(response)
def testRecaptchaResponseInvalid(self):
mockHandler = MockHTTPSHandler(mock_recaptcha_response_invalid)
url_opener = urllib2.build_opener(mockHandler)
urllib2.install_opener(url_opener)
response = self.testApp.post('/contact', self.goodPost, extra_environ=dict(REMOTE_ADDR='127.0.0.1'))
messages = self.mail_stub.get_sent_messages(to='alex.bransbywilliams@gmail.com')
self.assertEqual(0, len(messages))
self.assertEqual(response.status_int, 302)
response = response.follow()
self.assertEqual('Humans Only', response.html.h1.string)
def assertMessageSent(self, response):
messages = self.mail_stub.get_sent_messages(to='alex.bransbywilliams@gmail.com')
self.assertEqual(1, len(messages))
self.assertEqual('alex.bransbywilliams@gmail.com', messages[0].to)
self.assertEqual('CV: Message from Alex Bransby-Williams', messages[0].subject)
self.assertEqual('A message\n\nSenders Email: alex@gmail.com', messages[0].body.decode())
self.assertEqual('Message Sent', response.html.h1.string)
def mock_good_recaptcha(req):
data = { 'success': True, 'error-codes': []}
resp = urllib2.addinfourl(StringIO(json.dumps(data)), "Content-Type: 'application/json'", req.get_full_url())
resp.code = 200
resp.msg = "OK"
return resp
def mock_invalid_recaptcha_secret(req):
data = { 'success': False, 'error-codes': ['invalid-input-secret']}
resp = urllib2.addinfourl(StringIO(json.dumps(data)), "Content-Type: 'application/json'", req.get_full_url())
resp.code = 200
resp.msg = "OK"
return resp
def mock_recaptcha_down(req):
resp = urllib2.addinfourl(StringIO("Uknown error message"), "Content-Type: 'text/plain'", req.get_full_url())
resp.code = 500
resp.msg = "Error"
def mock_recaptcha_response_invalid(req):
data = { 'success': False, 'error-codes': ['invalid-input-response']}
resp = urllib2.addinfourl(StringIO(json.dumps(data)), "Content-Type: 'application/json'", req.get_full_url())
resp.code = 200
resp.msg = "OK"
return resp
class MockHTTPSHandler(urllib2.HTTPSHandler):
def __init__(self, mockFunc):
self.full_url = ''
self.method = ''
self.set_mock(mockFunc)
def https_open(self, req):
self.full_url = req.get_full_url()
self.method = req.get_method()
return self._mock(req)
def set_mock(self, mockFunc):
self._mock = mockFunc
if __name__ == '__main__':
unittest.main()
| |
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
# Copyright (c) 2012 X.commerce, a business unit of eBay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from django.conf import settings
from django import http
from django.test.utils import override_settings
from mox3.mox import IsA # noqa
from novaclient import exceptions as nova_exceptions
from novaclient.v2 import flavor_access as nova_flavor_access
from novaclient.v2 import servers
import six
from horizon import exceptions as horizon_exceptions
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
class ServerWrapperTests(test.TestCase):
def test_get_base_attribute(self):
server = api.nova.Server(self.servers.first(), self.request)
self.assertEqual(self.servers.first().id, server.id)
def test_image_name(self):
image = self.images.first()
self.mox.StubOutWithMock(api.glance, 'image_get')
api.glance.image_get(IsA(http.HttpRequest),
image.id).AndReturn(image)
self.mox.ReplayAll()
server = api.nova.Server(self.servers.first(), self.request)
self.assertEqual(image.name, server.image_name)
def test_image_name_no_glance_service(self):
server = self.servers.first()
self.mox.StubOutWithMock(api.glance, 'image_get')
api.glance.image_get(IsA(http.HttpRequest),
server.image['id']).AndRaise(
horizon_exceptions.ServiceCatalogException('image'))
self.mox.ReplayAll()
server = api.nova.Server(server, self.request)
self.assertEqual('-', server.image_name)
class ComputeApiTests(test.APITestCase):
def test_server_reboot(self):
server = self.servers.first()
HARDNESS = servers.REBOOT_HARD
novaclient = self.stub_novaclient()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.reboot(server.id, HARDNESS)
self.mox.ReplayAll()
ret_val = api.nova.server_reboot(self.request, server.id)
self.assertIsNone(ret_val)
def test_server_soft_reboot(self):
server = self.servers.first()
HARDNESS = servers.REBOOT_SOFT
novaclient = self.stub_novaclient()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.reboot(server.id, HARDNESS)
self.mox.ReplayAll()
ret_val = api.nova.server_reboot(self.request, server.id, HARDNESS)
self.assertIsNone(ret_val)
def test_server_vnc_console(self):
server = self.servers.first()
console = self.servers.vnc_console_data
console_type = console["console"]["type"]
novaclient = self.stub_novaclient()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.get_vnc_console(server.id,
console_type).AndReturn(console)
self.mox.ReplayAll()
ret_val = api.nova.server_vnc_console(self.request,
server.id,
console_type)
self.assertIsInstance(ret_val, api.nova.VNCConsole)
def test_server_spice_console(self):
server = self.servers.first()
console = self.servers.spice_console_data
console_type = console["console"]["type"]
novaclient = self.stub_novaclient()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.get_spice_console(server.id,
console_type).AndReturn(console)
self.mox.ReplayAll()
ret_val = api.nova.server_spice_console(self.request,
server.id,
console_type)
self.assertIsInstance(ret_val, api.nova.SPICEConsole)
def test_server_rdp_console(self):
server = self.servers.first()
console = self.servers.rdp_console_data
console_type = console["console"]["type"]
novaclient = self.stub_novaclient()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.get_rdp_console(server.id,
console_type).AndReturn(console)
self.mox.ReplayAll()
ret_val = api.nova.server_rdp_console(self.request,
server.id,
console_type)
self.assertIsInstance(ret_val, api.nova.RDPConsole)
def test_server_list(self):
servers = self.servers.list()
novaclient = self.stub_novaclient()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.list(True, {'all_tenants': True}).AndReturn(servers)
self.mox.ReplayAll()
ret_val, has_more = api.nova.server_list(self.request,
all_tenants=True)
for server in ret_val:
self.assertIsInstance(server, api.nova.Server)
def test_server_list_pagination(self):
page_size = getattr(settings, 'API_RESULT_PAGE_SIZE', 20)
servers = self.servers.list()
novaclient = self.stub_novaclient()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.list(True,
{'all_tenants': True,
'marker': None,
'limit': page_size + 1}).AndReturn(servers)
self.mox.ReplayAll()
ret_val, has_more = api.nova.server_list(self.request,
{'marker': None,
'paginate': True},
all_tenants=True)
for server in ret_val:
self.assertIsInstance(server, api.nova.Server)
self.assertFalse(has_more)
@override_settings(API_RESULT_PAGE_SIZE=1)
def test_server_list_pagination_more(self):
page_size = getattr(settings, 'API_RESULT_PAGE_SIZE', 1)
servers = self.servers.list()
novaclient = self.stub_novaclient()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.list(True,
{'all_tenants': True,
'marker': None,
'limit': page_size + 1}) \
.AndReturn(servers[:page_size + 1])
self.mox.ReplayAll()
ret_val, has_more = api.nova.server_list(self.request,
{'marker': None,
'paginate': True},
all_tenants=True)
for server in ret_val:
self.assertIsInstance(server, api.nova.Server)
self.assertEqual(page_size, len(ret_val))
self.assertTrue(has_more)
def test_usage_get(self):
novaclient = self.stub_novaclient()
novaclient.usage = self.mox.CreateMockAnything()
novaclient.usage.get(self.tenant.id,
'start',
'end').AndReturn(self.usages.first())
self.mox.ReplayAll()
ret_val = api.nova.usage_get(self.request, self.tenant.id,
'start', 'end')
self.assertIsInstance(ret_val, api.nova.NovaUsage)
def test_usage_list(self):
usages = self.usages.list()
novaclient = self.stub_novaclient()
novaclient.usage = self.mox.CreateMockAnything()
novaclient.usage.list('start', 'end', True).AndReturn(usages)
self.mox.ReplayAll()
ret_val = api.nova.usage_list(self.request, 'start', 'end')
for usage in ret_val:
self.assertIsInstance(usage, api.nova.NovaUsage)
def test_server_get(self):
server = self.servers.first()
novaclient = self.stub_novaclient()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.get(server.id).AndReturn(server)
self.mox.ReplayAll()
ret_val = api.nova.server_get(self.request, server.id)
self.assertIsInstance(ret_val, api.nova.Server)
def test_server_metadata_update(self):
server = self.servers.first()
metadata = {'foo': 'bar'}
novaclient = self.stub_novaclient()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.set_meta(server.id, metadata)
self.mox.ReplayAll()
ret_val = api.nova.server_metadata_update(self.request,
server.id,
metadata)
self.assertIsNone(ret_val)
def test_server_metadata_delete(self):
server = self.servers.first()
keys = ['a', 'b']
novaclient = self.stub_novaclient()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.delete_meta(server.id, keys)
self.mox.ReplayAll()
ret_val = api.nova.server_metadata_delete(self.request,
server.id,
keys)
self.assertIsNone(ret_val)
def _test_absolute_limits(self, values, expected_results):
limits = self.mox.CreateMockAnything()
limits.absolute = []
for key, val in six.iteritems(values):
limit = self.mox.CreateMockAnything()
limit.name = key
limit.value = val
limits.absolute.append(limit)
novaclient = self.stub_novaclient()
novaclient.limits = self.mox.CreateMockAnything()
novaclient.limits.get(reserved=True).AndReturn(limits)
self.mox.ReplayAll()
ret_val = api.nova.tenant_absolute_limits(self.request, reserved=True)
for key in expected_results.keys():
self.assertEqual(expected_results[key], ret_val[key])
def test_absolute_limits_handle_unlimited(self):
values = {"maxTotalCores": -1, "maxTotalInstances": 10}
expected_results = {"maxTotalCores": float("inf"),
"maxTotalInstances": 10}
self._test_absolute_limits(values, expected_results)
def test_absolute_limits_negative_used_workaround(self):
values = {"maxTotalCores": -1,
"maxTotalInstances": 10,
"totalInstancesUsed": -1,
"totalCoresUsed": -1,
"totalRAMUsed": -2048,
"totalSecurityGroupsUsed": 1,
"totalFloatingIpsUsed": 0,
}
expected_results = {"maxTotalCores": float("inf"),
"maxTotalInstances": 10,
"totalInstancesUsed": 0,
"totalCoresUsed": 0,
"totalRAMUsed": 0,
"totalSecurityGroupsUsed": 1,
"totalFloatingIpsUsed": 0,
}
self._test_absolute_limits(values, expected_results)
def test_cold_migrate_host_succeed(self):
hypervisor = self.hypervisors.first()
novaclient = self.stub_novaclient()
novaclient.hypervisors = self.mox.CreateMockAnything()
novaclient.hypervisors.search('host', True).AndReturn([hypervisor])
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.migrate("test_uuid")
self.mox.ReplayAll()
ret_val = api.nova.migrate_host(self.request, "host", False, True,
True)
self.assertTrue(ret_val)
def test_cold_migrate_host_fails(self):
hypervisor = self.hypervisors.first()
novaclient = self.stub_novaclient()
novaclient.hypervisors = self.mox.CreateMockAnything()
novaclient.hypervisors.search('host', True).AndReturn([hypervisor])
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.migrate("test_uuid").AndRaise(
nova_exceptions.ClientException(404))
self.mox.ReplayAll()
self.assertRaises(nova_exceptions.ClientException,
api.nova.migrate_host,
self.request, "host", False, True, True)
def test_live_migrate_host_with_active_vm(self):
hypervisor = self.hypervisors.first()
server = self.servers.first()
novaclient = self.stub_novaclient()
server_uuid = hypervisor.servers[0]["uuid"]
novaclient.hypervisors = self.mox.CreateMockAnything()
novaclient.hypervisors.search('host', True).AndReturn([hypervisor])
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.get(server_uuid).AndReturn(server)
novaclient.servers.live_migrate(server_uuid, None, True, True)
self.mox.ReplayAll()
ret_val = api.nova.migrate_host(self.request, "host", True, True,
True)
self.assertTrue(ret_val)
def test_live_migrate_host_with_paused_vm(self):
hypervisor = self.hypervisors.first()
server = self.servers.list()[3]
novaclient = self.stub_novaclient()
server_uuid = hypervisor.servers[0]["uuid"]
novaclient.hypervisors = self.mox.CreateMockAnything()
novaclient.hypervisors.search('host', True).AndReturn([hypervisor])
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.get(server_uuid).AndReturn(server)
novaclient.servers.live_migrate(server_uuid, None, True, True)
self.mox.ReplayAll()
ret_val = api.nova.migrate_host(self.request, "host", True, True,
True)
self.assertTrue(ret_val)
def test_live_migrate_host_without_running_vm(self):
hypervisor = self.hypervisors.first()
server = self.servers.list()[1]
novaclient = self.stub_novaclient()
server_uuid = hypervisor.servers[0]["uuid"]
novaclient.hypervisors = self.mox.CreateMockAnything()
novaclient.hypervisors.search('host', True).AndReturn([hypervisor])
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.get(server_uuid).AndReturn(server)
novaclient.servers.migrate(server_uuid)
self.mox.ReplayAll()
ret_val = api.nova.migrate_host(self.request, "host", True, True,
True)
self.assertTrue(ret_val)
"""Flavor Tests"""
def test_flavor_list_no_extras(self):
flavors = self.flavors.list()
novaclient = self.stub_novaclient()
novaclient.flavors = self.mox.CreateMockAnything()
novaclient.flavors.list(is_public=True).AndReturn(flavors)
self.mox.ReplayAll()
api_flavors = api.nova.flavor_list(self.request)
self.assertEqual(len(flavors), len(api_flavors))
def test_flavor_get_no_extras(self):
flavor = self.flavors.list()[1]
novaclient = self.stub_novaclient()
novaclient.flavors = self.mox.CreateMockAnything()
novaclient.flavors.get(flavor.id).AndReturn(flavor)
self.mox.ReplayAll()
api_flavor = api.nova.flavor_get(self.request, flavor.id)
self.assertEqual(api_flavor.id, flavor.id)
def _test_flavor_list_paged(self, reversed_order=False, paginate=True):
page_size = getattr(settings, 'API_RESULT_PAGE_SIZE', 20)
flavors = self.flavors.list()
order = 'asc' if reversed_order else 'desc'
novaclient = self.stub_novaclient()
novaclient.flavors = self.mox.CreateMockAnything()
if paginate:
novaclient.flavors.list(is_public=True,
marker=None,
limit=page_size + 1,
sort_key='name',
sort_dir=order).AndReturn(flavors)
else:
novaclient.flavors.list(is_public=True).AndReturn(flavors)
self.mox.ReplayAll()
api_flavors, has_more, has_prev = api.nova\
.flavor_list_paged(
self.request,
True,
False,
None,
paginate=paginate,
reversed_order=reversed_order)
for flavor in api_flavors:
self.assertIsInstance(flavor, type(flavors[0]))
self.assertFalse(has_more)
self.assertFalse(has_prev)
@override_settings(API_RESULT_PAGE_SIZE=1)
def test_flavor_list_pagination_more_and_prev(self):
page_size = getattr(settings, 'API_RESULT_PAGE_SIZE', 1)
flavors = self.flavors.list()
marker = flavors[0].id
novaclient = self.stub_novaclient()
novaclient.flavors = self.mox.CreateMockAnything()
novaclient.flavors.list(is_public=True,
marker=marker,
limit=page_size + 1,
sort_key='name',
sort_dir='desc')\
.AndReturn(flavors[1:page_size + 2])
self.mox.ReplayAll()
api_flavors, has_more, has_prev = api.nova\
.flavor_list_paged(
self.request,
True,
False,
marker,
paginate=True)
for flavor in api_flavors:
self.assertIsInstance(flavor, type(flavors[0]))
self.assertEqual(page_size, len(api_flavors))
self.assertTrue(has_more)
self.assertTrue(has_prev)
def test_flavor_list_paged_default_order(self):
self._test_flavor_list_paged()
def test_flavor_list_paged_reversed_order(self):
self._test_flavor_list_paged(reversed_order=True)
def test_flavor_list_paged_paginate_false(self):
self._test_flavor_list_paged(paginate=False)
def test_flavor_create(self):
flavor = self.flavors.first()
novaclient = self.stub_novaclient()
novaclient.flavors = self.mox.CreateMockAnything()
novaclient.flavors.create(flavor.name, flavor.ram,
flavor.vcpus, flavor.disk,
flavorid='auto',
ephemeral=0,
swap=0,
is_public=True,
rxtx_factor=1).AndReturn(flavor)
self.mox.ReplayAll()
api_flavor = api.nova.flavor_create(self.request,
flavor.name,
flavor.ram,
flavor.vcpus,
flavor.disk)
self.assertIsInstance(api_flavor, type(flavor))
self.assertEqual(flavor.name, api_flavor.name)
self.assertEqual(flavor.ram, api_flavor.ram)
self.assertEqual(flavor.vcpus, api_flavor.vcpus)
self.assertEqual(flavor.disk, api_flavor.disk)
self.assertEqual(0, api_flavor.ephemeral)
self.assertEqual(0, api_flavor.swap)
self.assertEqual(True, api_flavor.is_public)
self.assertEqual(1, api_flavor.rxtx_factor)
def test_flavor_delete(self):
flavor = self.flavors.first()
novaclient = self.stub_novaclient()
novaclient.flavors = self.mox.CreateMockAnything()
novaclient.flavors.delete(flavor.id)
self.mox.ReplayAll()
api_val = api.nova.flavor_delete(self.request, flavor.id)
self.assertIsNone(api_val)
@test.create_stubs({api.nova: ('flavor_access_list',)})
def test_flavor_access_list(self):
flavor_access = self.flavor_access.list()
flavor = [f for f in self.flavors.list() if f.id ==
flavor_access[0].flavor_id][0]
api.nova.flavor_access_list(self.request, flavor)\
.AndReturn(flavor_access)
self.mox.ReplayAll()
api_flavor_access = api.nova.flavor_access_list(self.request, flavor)
self.assertEqual(len(flavor_access), len(api_flavor_access))
for access in api_flavor_access:
self.assertIsInstance(access, nova_flavor_access.FlavorAccess)
self.assertEqual(access.flavor_id, flavor.id)
def test_add_tenant_to_flavor(self):
flavor_access = [self.flavor_access.first()]
flavor = [f for f in self.flavors.list() if f.id ==
flavor_access[0].flavor_id][0]
tenant = [t for t in self.tenants.list() if t.id ==
flavor_access[0].tenant_id][0]
novaclient = self.stub_novaclient()
novaclient.flavors = self.mox.CreateMockAnything()
novaclient.flavor_access = self.mox.CreateMockAnything()
novaclient.flavor_access\
.add_tenant_access(flavor=flavor,
tenant=tenant)\
.AndReturn(flavor_access)
self.mox.ReplayAll()
api_flavor_access = api.nova.add_tenant_to_flavor(self.request,
flavor,
tenant)
self.assertIsInstance(api_flavor_access, list)
self.assertEqual(len(flavor_access), len(api_flavor_access))
for access in api_flavor_access:
self.assertEqual(access.flavor_id, flavor.id)
self.assertEqual(access.tenant_id, tenant.id)
def test_remove_tenant_from_flavor(self):
flavor_access = [self.flavor_access.first()]
flavor = [f for f in self.flavors.list() if f.id ==
flavor_access[0].flavor_id][0]
tenant = [t for t in self.tenants.list() if t.id ==
flavor_access[0].tenant_id][0]
novaclient = self.stub_novaclient()
novaclient.flavors = self.mox.CreateMockAnything()
novaclient.flavor_access = self.mox.CreateMockAnything()
novaclient.flavor_access\
.remove_tenant_access(flavor=flavor,
tenant=tenant)\
.AndReturn([])
self.mox.ReplayAll()
api_val = api.nova.remove_tenant_from_flavor(self.request,
flavor,
tenant)
self.assertEqual(len(api_val), len([]))
self.assertIsInstance(api_val, list)
def test_server_group_list(self):
server_groups = self.server_groups.list()
novaclient = self.stub_novaclient()
novaclient.server_groups = self.mox.CreateMockAnything()
novaclient.server_groups.list().AndReturn(server_groups)
self.mox.ReplayAll()
ret_val = api.nova.server_group_list(self.request)
self.assertIsInstance(ret_val, list)
self.assertEqual(len(ret_val), len(server_groups))
| |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Eval a model on downstream task and to measure fidelity."""
import os
import time
from typing import Any, List, Optional, Tuple, Union
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
from non_semantic_speech_benchmark.trillsson import get_data
from non_semantic_speech_benchmark.trillsson import models
FLAGS = flags.FLAGS
flags.DEFINE_string('logdir', None, 'Directory where the model was written to.')
flags.DEFINE_string('eval_dir', None,
'Directory where the results are saved to.')
flags.DEFINE_string('file_pattern_train', None, 'Dataset location.')
flags.DEFINE_string('file_pattern_validation', None, 'Dataset location.')
flags.DEFINE_string('file_pattern_test', None, 'Dataset location.')
flags.DEFINE_string('eval_suffix', None, 'Prefix for tensorboard.')
flags.DEFINE_integer('eval_batch_size', None, 'The number of eval batches.')
flags.DEFINE_string(
'label_key', None,
'Labels in the dataset on disk. Will be dumped to disk for '
'`downstream_sklearn_eval` in a different format.')
flags.DEFINE_string(
'target_key', None, 'Teacher embedding key in precomputed tf.Examples.')
# Flags for dumping embeddings to disk for more analysis.
flags.DEFINE_string(
'embeddings_output_dir', None,
'Optional directory to write embeddings to disk.')
flags.DEFINE_string('speaker_id_key', None, 'Optional')
# Teacher / student network flags.
flags.DEFINE_string('model_type', None, 'Specification for student model.')
flags.DEFINE_alias('mt', 'model_type')
flags.DEFINE_float('lr', None, 'not used')
flags.DEFINE_integer('take_fixed_data', None,
'If not `None`, take a fixed number of data elements.')
flags.DEFINE_integer('timeout', 7200, 'Wait-for-checkpoint timeout.')
# Not used.
flags.DEFINE_integer('max_sample_length', -1, 'Max samples length.')
flags.DEFINE_alias('msl', 'max_sample_length')
flags.DEFINE_integer('tbs', None, 'The number of images in each batch.')
# Constants for writing embedding data dump.
AUDIO_KEY_ = 'audio'
LABEL_KEY_ = 'label'
EMBEDDING_KEY_ = 'emb'
SPLIT_NAMES_ = ['train', 'validation', 'test']
def _get_embedding_filename(base_dir, split_name, step):
"""Create the filename for embeddings."""
return os.path.join(base_dir, str(step), f'{split_name}-embeddings.tfrecord')
def _get_ds(file_patterns, step):
"""Gets a tf.Dataset for a file."""
ds = get_data.get_data(
file_patterns=file_patterns,
reader=tf.data.TFRecordDataset,
samples_key=AUDIO_KEY_,
batch_size=FLAGS.eval_batch_size,
loop_forever=False,
shuffle=False,
target_key=FLAGS.target_key,
label_key=FLAGS.label_key,
speaker_id_key=FLAGS.speaker_id_key,
samples_are_float=False,
max_samples_length=None)
logging.info('Got dataset for eval step: %s.', step)
if FLAGS.take_fixed_data:
ds = ds.take(FLAGS.take_fixed_data)
return ds
def _get_splits(
names, file_patterns,
embeddings_output_dir, step
):
"""Returns a list of (name, dataset, OPTIONAL tfrecord writer)."""
assert len(names) == len(file_patterns)
dss = [_get_ds(file_pattern, step) for file_pattern in file_patterns]
logging.info('[_get_splits]: Got dss: %s', dss)
if embeddings_output_dir:
emb_writers = []
for n in names:
emb_filename = _get_embedding_filename(embeddings_output_dir, n, step)
if not tf.io.gfile.exists(os.path.dirname(emb_filename)):
logging.info('Creating dir: %s', os.path.dirname(emb_filename))
tf.io.gfile.makedirs(os.path.dirname(emb_filename))
emb_writers.append(tf.io.TFRecordWriter(emb_filename))
else:
emb_writers = [None, None, None]
return list(zip(names, dss, emb_writers))
def process_single_checkpoint(
writer,
model,
checkpoint,
ckpt,
output_dim,
model_output_key,
embeddings_output_dir,
file_pattern_train,
file_pattern_validation,
file_pattern_test):
"""Perform all the actions associated with a single checkpoint."""
assert 'ckpt-' in ckpt, ckpt
step = int(ckpt.split('ckpt-')[-1])
logging.info(
'[process_single_checkpoint] Starting to evaluate step: %i.', step)
checkpoint.restore(ckpt)
logging.info(
'[process_single_checkpoint] Loaded weights for eval step: %i.', step)
splits_metadata = _get_splits(
SPLIT_NAMES_,
[file_pattern_train, file_pattern_validation, file_pattern_test],
embeddings_output_dir, step)
logging.info('[process_single_checkpoint] Got splits metadata.')
# Track MSE and MAE both per-split and overall.
mse_ms = {n: tf.keras.metrics.MeanSquaredError() for n in SPLIT_NAMES_}
mae_ms = {n: tf.keras.metrics.MeanAbsoluteError() for n in SPLIT_NAMES_}
mse_all = tf.keras.metrics.MeanSquaredError()
mae_all = tf.keras.metrics.MeanAbsoluteError()
logging.info('Starting the ds loop...')
count, ex_count = 0, 0
s = time.time()
for split_name, ds, emb_writer in splits_metadata:
for outs in ds:
if FLAGS.speaker_id_key:
wav_samples, targets, labels, speaker_ids = outs
else:
wav_samples, targets, labels = outs
speaker_ids = [None] * wav_samples.shape[0]
wav_samples.shape.assert_is_compatible_with([None, None])
targets.shape.assert_is_compatible_with([None, output_dim])
embs = model(wav_samples, training=False)[model_output_key]
embs.shape.assert_is_compatible_with(targets.shape)
embs = tf.debugging.check_numerics(
embs, message='Nans', name='check_numerics')
# Update the split-specific metric and overall metric.
for met in (mse_ms[split_name], mae_ms[split_name], mse_all, mae_all):
met.update_state(y_true=targets, y_pred=embs)
ex_count += embs.shape[0]
count += 1
logging.info('Saw %i examples after %i iterations as %.2f secs...',
ex_count, count,
time.time() - s)
# Rather than store all embeddings in memory and write them to disk at
# the end, let's write embeddings to disk as we generate them, if we
# need to.
if emb_writer:
logging.info('Starting to write %i embeddings to disk...', ex_count)
for emb, lbl, speaker_id in zip(embs, labels, speaker_ids):
make_tfexample_and_write(emb, lbl, speaker_id, FLAGS.speaker_id_key,
emb_writer)
logging.info('Wrote %i embeddings to disk.', ex_count)
with writer.as_default():
suff = FLAGS.eval_suffix
tf.summary.scalar(f'mse_all_{suff}', mse_all.result().numpy(), step=step)
tf.summary.scalar(f'mae_all_{suff}', mae_all.result().numpy(), step=step)
for split in SPLIT_NAMES_:
tf.summary.scalar(
f'mse_{split}_{suff}', mse_ms[split].result().numpy(), step=step)
tf.summary.scalar(
f'mae_{split}_{suff}', mae_ms[split].result().numpy(), step=step)
for _, _, emb_writer in splits_metadata:
if emb_writer:
emb_writer.close()
logging.info('Done with eval step: %i in %.2f secs.', step, time.time() - s)
def eval_and_report(output_dim = 1024,
model_output_key = 'embedding'):
"""Check fidelity of a dataset."""
logging.info('Logdir: %s', FLAGS.logdir)
writer = tf.summary.create_file_writer(FLAGS.eval_dir)
model = models.get_keras_model(
model_type=FLAGS.model_type, frame_hop=FLAGS.frame_hop)
checkpoint = tf.train.Checkpoint(model=model)
for ckpt in tf.train.checkpoints_iterator(
FLAGS.logdir, timeout=FLAGS.timeout):
process_single_checkpoint(
writer=writer,
model=model,
checkpoint=checkpoint,
ckpt=ckpt,
output_dim=output_dim,
model_output_key=model_output_key,
embeddings_output_dir=FLAGS.embeddings_output_dir,
file_pattern_train=FLAGS.file_pattern_train,
file_pattern_validation=FLAGS.file_pattern_validation,
file_pattern_test=FLAGS.file_pattern_test)
def make_tfexample_and_write(emb, onehot_lbl,
speaker_id,
speaker_id_key,
tfrecord_writer):
"""Create and write tf.Example from an embedding.
This output should be able to be read by `train_and_get_score`.`
Args:
emb: An embedding Tensor.
onehot_lbl: The onehot label for this embedding.
speaker_id: Optionally, the speaker ID for this embedding.
speaker_id_key: Optionally, the key for speaker ID.
tfrecord_writer: An open tfrecord writer.
"""
# New tf.Example.
ex = tf.train.Example()
# Add the embedding.
ex.features.feature[f'embedding/{EMBEDDING_KEY_}'].float_list.value.extend(
emb.numpy())
# Add the label.
ex.features.feature[LABEL_KEY_].bytes_list.value.append(onehot_lbl.numpy())
# Optionally add the speaker ID.
if speaker_id:
ex.features.feature[speaker_id_key].bytes_list.value.append(
speaker_id.numpy())
tfrecord_writer.write(ex.SerializeToString())
def main(unused_argv):
assert FLAGS.model_type
assert FLAGS.file_pattern_train
assert FLAGS.file_pattern_validation
assert FLAGS.file_pattern_test
assert FLAGS.logdir
assert FLAGS.eval_batch_size
assert FLAGS.target_key
assert tf.executing_eagerly()
eval_and_report()
if __name__ == '__main__':
app.run(main)
| |
""" Cisco_IOS_XR_ncs1k_mxp_cfg
This module contains a collection of YANG definitions
for Cisco IOS\-XR ncs1k\-mxp package configuration.
This module contains definitions
for the following management objects\:
hardware\-module\: NCS1k HW module config
Copyright (c) 2013\-2016 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class ClientDataRateEnum(Enum):
"""
ClientDataRateEnum
Client data rate
.. data:: ten_gig = 1
TenGig
.. data:: forty_gig = 2
FortyGig
.. data:: hundred_gig = 3
HundredGig
"""
ten_gig = 1
forty_gig = 2
hundred_gig = 3
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ncs1k_mxp_cfg as meta
return meta._meta_table['ClientDataRateEnum']
class FecEnum(Enum):
"""
FecEnum
Fec
.. data:: sd7 = 1
SoftDecision7
.. data:: sd20 = 2
SoftDecision20
"""
sd7 = 1
sd20 = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ncs1k_mxp_cfg as meta
return meta._meta_table['FecEnum']
class TrunkDataRateEnum(Enum):
"""
TrunkDataRateEnum
Trunk data rate
.. data:: hundred_gig = 2
HundredGig
.. data:: two_hundred_gig = 3
TwoHundredGig
.. data:: two_hundred_fifty_gig = 4
TwoHundredFiftyGig
"""
hundred_gig = 2
two_hundred_gig = 3
two_hundred_fifty_gig = 4
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ncs1k_mxp_cfg as meta
return meta._meta_table['TrunkDataRateEnum']
class HardwareModule(object):
"""
NCS1k HW module config
.. attribute:: node
Node
**type**\: list of :py:class:`Node <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_mxp_cfg.HardwareModule.Node>`
"""
_prefix = 'ncs1k-mxp-cfg'
_revision = '2015-11-09'
def __init__(self):
self.node = YList()
self.node.parent = self
self.node.name = 'node'
class Node(object):
"""
Node
.. attribute:: location <key>
Fully qualified line card specification
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: slice
Slice to be Provisioned
**type**\: list of :py:class:`Slice <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_mxp_cfg.HardwareModule.Node.Slice>`
"""
_prefix = 'ncs1k-mxp-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.location = None
self.slice = YList()
self.slice.parent = self
self.slice.name = 'slice'
class Slice(object):
"""
Slice to be Provisioned
.. attribute:: slice_id <key>
Set Slice
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: lldp
Drop LLDP Packets
**type**\: bool
.. attribute:: values
Data rates & FEC
**type**\: :py:class:`Values <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_mxp_cfg.HardwareModule.Node.Slice.Values>`
"""
_prefix = 'ncs1k-mxp-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.slice_id = None
self.lldp = None
self.values = HardwareModule.Node.Slice.Values()
self.values.parent = self
class Values(object):
"""
Data rates & FEC
.. attribute:: client_rate
Client Rate
**type**\: :py:class:`ClientDataRateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_mxp_cfg.ClientDataRateEnum>`
.. attribute:: encrypted
Encrypted
**type**\: bool
**default value**\: false
.. attribute:: fec
FEC
**type**\: :py:class:`FecEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_mxp_cfg.FecEnum>`
.. attribute:: trunk_rate
TrunkRate
**type**\: :py:class:`TrunkDataRateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ncs1k_mxp_cfg.TrunkDataRateEnum>`
"""
_prefix = 'ncs1k-mxp-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.client_rate = None
self.encrypted = None
self.fec = None
self.trunk_rate = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ncs1k-mxp-cfg:values'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.client_rate is not None:
return True
if self.encrypted is not None:
return True
if self.fec is not None:
return True
if self.trunk_rate is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ncs1k_mxp_cfg as meta
return meta._meta_table['HardwareModule.Node.Slice.Values']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.slice_id is None:
raise YPYModelError('Key property slice_id is None')
return self.parent._common_path +'/Cisco-IOS-XR-ncs1k-mxp-cfg:slice[Cisco-IOS-XR-ncs1k-mxp-cfg:slice-id = ' + str(self.slice_id) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.slice_id is not None:
return True
if self.lldp is not None:
return True
if self.values is not None and self.values._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ncs1k_mxp_cfg as meta
return meta._meta_table['HardwareModule.Node.Slice']['meta_info']
@property
def _common_path(self):
if self.location is None:
raise YPYModelError('Key property location is None')
return '/Cisco-IOS-XR-ncs1k-mxp-cfg:hardware-module/Cisco-IOS-XR-ncs1k-mxp-cfg:node[Cisco-IOS-XR-ncs1k-mxp-cfg:location = ' + str(self.location) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.location is not None:
return True
if self.slice is not None:
for child_ref in self.slice:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ncs1k_mxp_cfg as meta
return meta._meta_table['HardwareModule.Node']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ncs1k-mxp-cfg:hardware-module'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.node is not None:
for child_ref in self.node:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ncs1k_mxp_cfg as meta
return meta._meta_table['HardwareModule']['meta_info']
| |
# Introduction
# ============
# A regular expression is a special type of pattern that is usually used find and replace
# parts of strings. They are related to other constructs of theoretical computer science
# like (regular) grammars and (finite) automata, which you could look up to understand
# more behind the motivation and where some of the short-comings (later on that) come from.
# Exercises
# ============
# The easiest regular expression is one that matches a specific string pattern exactly.
# Disregarding some special characters, the regular expression looks the same as the pattern to match:
#
rex = 'spam' # matches the string 'spam' exactly
# Python defines all the functionality regarding regular expressions in the 're' module:
import re
# The 'match' function is used to see if the beginning of a string matches a regular expression:
assert re.match(rex, 'spam')
assert re.match(rex, 'spammer')
assert not re.match(rex, 'eggs')
# If the string does not match the regular expression
# EXERCISE:
# write a regular expression that matches 'eggs' instead of 'spam':
# assert re.match(myrex, 'eggs')
# Matching a string exactly is not very different from just checking that two strings are equal.
# But regular expressions can do more than that. They allow for matching arbitrary characters
# instead of specific ones. The '.' (dot) character is used for that:
rex = 'spam.'
assert re.match(rex, 'spams')
assert re.match(rex, 'spamd')
assert not re.match(rex, 'spoms')
# EXERCISE:
# write a single regular expression that matches both 'egged' and 'egald'
# rex = 'somerex'
# assert re.match(rex, 'egged')
# assert re.match(rex, 'egald')
# We can match a choice of patterns by using the '|' (bar) character:
rex = 'spam|eggs'
assert re.match(rex, 'spam')
assert re.match(rex, 'eggs')
# EXERCISE:
# Write a regular expression that matches both color and colour using the bar:
# rex = 'somerex'
# assert re.match(rex, 'color')
# assert re.match(rex, 'colour')
# Sometimes we want to match strings of variable length. Regular expressions introduce
# quantifiers for this. The first one is the "optional" quantifier '?' (question mark).
# It means the character in front of it can be left out:
rex = 'spams?'
assert re.match(rex, 'spams')
assert re.match(rex, 'spam')
# This can also be used wit the dot:
rex = 'spam.?'
assert re.match(rex, 'spams')
assert re.match(rex, 'spamd')
assert re.match(rex, 'spam')
# EXERCISE:
# Write a regular expression that matches both color and colour using the question mark:
# rex = 'somerex'
# assert re.match(rex, 'color')
# assert re.match(rex, 'colour')
# The next quantifier is '*' (asterisk). It means that on top of the character being
# optional, there can be as many of them as there need to be:
rex = 'foo*'
assert re.match(rex, 'fo')
assert re.match(rex, 'foo')
assert re.match(rex, 'fooooooo')
# EXERCISE:
# Write a regular expression that matches an arbitrary number of 'o's in between two 'l's:
# rex = 'somerex'
# assert re.match(rex, 'll')
# assert re.match(rex, 'lol')
# assert re.match(rex, 'loooool')
# With the '+' (plus sign) we require there to be at least of the of preceeding character
# in the string:
rex = 'foo+'
assert not re.match(rex, 'fo')
assert re.match(rex, 'foo')
assert re.match(rex, 'fooooooo')
# You can use re.match to check if the start of a string matches a regular expression.
# To check if any part of a string matches the regular expression, you can use re.search:
rex = 'foo+'
assert not re.search(rex, 'spamfoeggs')
assert re.search(rex, 'spamfooeggs')
# EXERCISE:
# Write a regular expression that matches any word string with at least t in it.
# rex = 'myrex'
# assert not re.search(rex, 'road')
# assert re.search(rex, 'toast')
# Python also allows for checking for a specific number of occurrences. For this
# '{}' (curly braces) are used:
rex = 'ba{5}'
assert re.match(rex, 'baaaaa')
assert not re.match(rex, 'baaa')
rex = 'ba{3,5}'
assert re.match(rex, 'baaaaa')
assert re.match(rex, 'baaa')
assert not re.match(rex, 'ba')
# EXERCISE:
# Write a regular expression that matches three to six 'o's followed by 'uf':
# rex = 'somerex'
# assert re.match(rex, 'ooouf')
# assert re.match(rex, 'oooooouf')
# assert not re.match(rex, 'ouf')
# For now we've only checked multiple occurrences of a single character. With grouping
# we can do this for a sequence of characters putting them in parentheses:
rex = "(eg)?gs"
assert re.match(rex, "gs")
assert not re.match(rex, "ggs")
assert re.match(rex, "eggs")
# Notice how 'ggs' is not matched, because the 'eg' group can only be matched as one.
# EXERCISE:
# Write a regular expression that matches 'some', then an arbitrary number of arbitrary characters
# in the middle and ends with either 'eggs' or 'spam':
#rex = 'somerex'
#assert re.match(rex, 'some classy eggs')
#assert re.match(rex, 'some delicious spam')
#assert not re.match(rex, 'some salty ham')
#assert not re.match(rex, 'any spammy spam')
# The '|' (bar) can be used mutiple times:
rex = 'a|b|c'
assert re.match(rex, 'at home')
assert re.match(rex, 'bar none')
assert re.match(rex, 'cute kittens')
assert not re.match(rex, 'dumb dogs')
# But this can be become tedious if we want to match then just a few characters or patterns.
# For this purpose, we can square braces as more compact way of writing these expressions:
rex = '[abc]'
assert re.match(rex, 'at home')
assert re.match(rex, 'bar none')
assert re.match(rex, 'cute kittens')
assert not re.match(rex, 'dumb dogs')
# Furthermore, we can use ranges of characters by putting dashes in between:
rex = '[a-c]'
assert re.match(rex, 'at home')
assert re.match(rex, 'bar none')
assert re.match(rex, 'cute kittens')
assert not re.match(rex, 'dumb dogs')
# For example, we can use [A-Z] to match all capital letters:
rex = '[A-Z]'
assert re.match(rex, 'Cute kittens')
assert not re.match(rex, 'cute kittens')
# EXERCISE:
# Write a regular expression that matches any string containing a number from 1 to 9
# rex = 'somerex'
# assert re.match(rex, 'toaster 3 still works')
# assert not re.match(rex, 'bread')
# assert not re.match(rex, 'number 0 is gone')
# We can also have multiple ranges in the same square brace expression:
rex = '[A-Za-z]+'
assert re.match(rex, 'immadeoutofletters')
assert not re.match(rex, '77Istartwithanumber')
# Python defines shortcuts for some of these ranges. You can use \d to match
# numerals and \w to match numerals and letters and underscores:
rex = '\d+\w*\d+' # match a string starting with a numeral, maybe followed by any
# letter or number, then followed by at least a numeral again
assert re.match(rex, '0spam3')
assert not re.match(rex, '0 45')
assert not re.match(rex, '0spam')
# EXERCISE:
# Write a regular expression, that matches 'there are x kittens in the kitchen',
# with x being any number.
# rex = 'somerex'
# assert re.match(rex, 'there are 5 kittens in the kitchen')
# assert re.match(rex, 'there are 25 kittens in the kitchen')
# assert not re.match(rex, 'there are no kittens in the kitchen')
# assert not re.match(rex, 'there are kittens in the kitchen')
# When a string matches a regular expression both re.match and re.search return
# Match objects which contains information about that particular match:
rex = '\d+' # find numbers in the string
match = re.search(rex, 'Some 100 flowers were trampled.')
assert match.start() == 5
assert match.end() == 8
# By using grouping, we can extract the matching part of the string directly, using
# the 'groups' method. It will return the values of the matches as a tuple:
rex = '(\d+).+(\d+)'
match = re.search(rex, 'Some 100 flowers made a mess in 5 different cities.')
assert match.groups() == ('100', '5')
# We can also access the value of the group directly via the 'group' function.
# The 0th group is the part of the string that matches the whole expression:
assert match.group(0) == '100 flowers made a mess in 5'
assert match.group(1) == '100'
assert match.group(2) == '5'
# EXERCISE:
# Write a function, that extracts
## TODO
| |
#! /usr/bin/env python2
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
LIBRARIES BUILD
"""
import sys
from time import time
from os.path import join, abspath, dirname
# Be sure that the tools directory is in the search path
ROOT = abspath(join(dirname(__file__), ".."))
sys.path.insert(0, ROOT)
from workspace_tools.toolchains import TOOLCHAINS
from workspace_tools.targets import TARGET_NAMES, TARGET_MAP
from workspace_tools.options import get_default_options_parser
from workspace_tools.build_api import build_mbed_libs, build_lib
from workspace_tools.build_api import mcu_toolchain_matrix
from workspace_tools.build_api import static_analysis_scan, static_analysis_scan_lib, static_analysis_scan_library
from workspace_tools.build_api import print_build_results
from workspace_tools.settings import CPPCHECK_CMD, CPPCHECK_MSG_FORMAT
if __name__ == '__main__':
start = time()
# Parse Options
parser = get_default_options_parser()
# Extra libraries
parser.add_option("-r", "--rtos",
action="store_true",
dest="rtos",
default=False,
help="Compile the rtos")
parser.add_option("--rpc",
action="store_true",
dest="rpc",
default=False,
help="Compile the rpc library")
parser.add_option("-e", "--eth",
action="store_true", dest="eth",
default=False,
help="Compile the ethernet library")
parser.add_option("-U", "--usb_host",
action="store_true",
dest="usb_host",
default=False,
help="Compile the USB Host library")
parser.add_option("-u", "--usb",
action="store_true",
dest="usb",
default=False,
help="Compile the USB Device library")
parser.add_option("-d", "--dsp",
action="store_true",
dest="dsp",
default=False,
help="Compile the DSP library")
parser.add_option("-F", "--fat",
action="store_true",
dest="fat",
default=False,
help="Compile FS and SD card file system library")
parser.add_option("-b", "--ublox",
action="store_true",
dest="ublox",
default=False,
help="Compile the u-blox library")
parser.add_option("", "--cpputest",
action="store_true",
dest="cpputest_lib",
default=False,
help="Compiles 'cpputest' unit test library (library should be on the same directory level as mbed repository)")
parser.add_option("-D", "",
action="append",
dest="macros",
help="Add a macro definition")
parser.add_option("-S", "--supported-toolchains",
action="store_true",
dest="supported_toolchains",
default=False,
help="Displays supported matrix of MCUs and toolchains")
parser.add_option("", "--cppcheck",
action="store_true",
dest="cppcheck_validation",
default=False,
help="Forces 'cppcheck' static code analysis")
parser.add_option('-f', '--filter',
dest='general_filter_regex',
default=None,
help='For some commands you can use filter to filter out results')
parser.add_option("-j", "--jobs", type="int", dest="jobs",
default=1, help="Number of concurrent jobs (default 1). Use 0 for auto based on host machine's number of CPUs")
parser.add_option("-v", "--verbose",
action="store_true",
dest="verbose",
default=False,
help="Verbose diagnostic output")
parser.add_option("--silent",
action="store_true",
dest="silent",
default=False,
help="Silent diagnostic output (no copy, compile notification)")
parser.add_option("-x", "--extra-verbose-notifications",
action="store_true",
dest="extra_verbose_notify",
default=False,
help="Makes compiler more verbose, CI friendly.")
(options, args) = parser.parse_args()
# Only prints matrix of supported toolchains
if options.supported_toolchains:
print mcu_toolchain_matrix(platform_filter=options.general_filter_regex)
exit(0)
# Get target list
if options.mcu:
mcu_list = (options.mcu).split(",")
for mcu in mcu_list:
if mcu not in TARGET_NAMES:
print "Given MCU '%s' not into the supported list:\n%s" % (mcu, TARGET_NAMES)
sys.exit(1)
targets = mcu_list
else:
targets = TARGET_NAMES
# Get toolchains list
if options.tool:
toolchain_list = (options.tool).split(",")
for tc in toolchain_list:
if tc not in TOOLCHAINS:
print "Given toolchain '%s' not into the supported list:\n%s" % (tc, TOOLCHAINS)
sys.exit(1)
toolchains = toolchain_list
else:
toolchains = TOOLCHAINS
# Get libraries list
libraries = []
# Additional Libraries
if options.rtos:
libraries.extend(["rtx", "rtos"])
if options.rpc:
libraries.extend(["rpc"])
if options.eth:
libraries.append("eth")
if options.usb:
libraries.append("usb")
if options.usb_host:
libraries.append("usb_host")
if options.dsp:
libraries.extend(["cmsis_dsp", "dsp"])
if options.fat:
libraries.extend(["fat"])
if options.ublox:
libraries.extend(["rtx", "rtos", "usb_host", "ublox"])
if options.cpputest_lib:
libraries.extend(["cpputest"])
# Build results
failures = []
successes = []
skipped = []
# CPPCHECK code validation
if options.cppcheck_validation:
for toolchain in toolchains:
for target in targets:
try:
mcu = TARGET_MAP[target]
# CMSIS and MBED libs analysis
static_analysis_scan(mcu, toolchain, CPPCHECK_CMD, CPPCHECK_MSG_FORMAT, verbose=options.verbose, jobs=options.jobs)
for lib_id in libraries:
# Static check for library
static_analysis_scan_lib(lib_id, mcu, toolchain, CPPCHECK_CMD, CPPCHECK_MSG_FORMAT,
options=options.options,
extra_verbose=options.extra_verbose_notify, verbose=options.verbose, jobs=options.jobs, clean=options.clean,
macros=options.macros)
pass
except Exception, e:
if options.verbose:
import traceback
traceback.print_exc(file=sys.stdout)
sys.exit(1)
print e
else:
# Build
for toolchain in toolchains:
for target in targets:
tt_id = "%s::%s" % (toolchain, target)
try:
mcu = TARGET_MAP[target]
lib_build_res = build_mbed_libs(mcu, toolchain,
options=options.options,
extra_verbose=options.extra_verbose_notify,
verbose=options.verbose,
silent=options.silent,
jobs=options.jobs,
clean=options.clean,
macros=options.macros)
for lib_id in libraries:
build_lib(lib_id, mcu, toolchain,
options=options.options,
extra_verbose=options.extra_verbose_notify,
verbose=options.verbose,
silent=options.silent,
clean=options.clean,
macros=options.macros,
jobs=options.jobs)
if lib_build_res:
successes.append(tt_id)
else:
skipped.append(tt_id)
except Exception, e:
if options.verbose:
import traceback
traceback.print_exc(file=sys.stdout)
sys.exit(1)
failures.append(tt_id)
print e
# Write summary of the builds
print
print "Completed in: (%.2f)s" % (time() - start)
print
for report, report_name in [(successes, "Build successes:"),
(skipped, "Build skipped:"),
(failures, "Build failures:"),
]:
if report:
print print_build_results(report, report_name),
if failures:
sys.exit(1)
| |
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import string
import glob
try:
import OpenSSL.crypto
HAS_OPENSSL = True
except:
HAS_OPENSSL = False
try:
import pyrad.packet
from pyrad.client import Client
from pyrad.dictionary import Dictionary
HAS_PYRAD = True
except:
HAS_PYRAD = False
from calvin.utilities.calvinlogger import get_logger
from calvin.utilities import calvinconfig
from calvin.utilities.utils import get_home
_conf = calvinconfig.get()
_log = get_logger(__name__)
#default timeout
TIMEOUT=5
def security_modules_check():
if _conf.get("security","security_conf") or _conf.get("security","security_policy"):
# Want security
if not HAS_OPENSSL:
# Miss open ssl
_log.error("Security: Install openssl to allow verification of signatures and certificates")
return False
_conf.get("security","security_conf")['authentication']
if _conf.get("security","security_conf")['authentication']['procedure'] == "radius" and not HAS_PYRAD:
_log.error("Security: Install pyrad to use radius server as authentication method.")
return False
return True
def security_needed_check():
if _conf.get("security","security_conf") or _conf.get("security","security_policy"):
# Want security
return True
else:
return False
class Security(object):
def __init__(self):
_log.debug("Security: _init_")
self.sec_conf = _conf.get("security","security_conf")
self.sec_policy = _conf.get("security","security_policy")
if self.sec_conf is not None and not self.sec_conf.get('signature_trust_store', None):
# Set default directory for trust store
homefolder = get_home()
truststore_dir = os.path.join(homefolder, ".calvin", "security", "trustStore")
self.sec_conf['signature_trust_store'] = truststore_dir
self.principal = {}
self.auth = {}
def __str__(self):
return "Principal: %s\nAuth: %s" % (self.principal, self.auth)
def set_principal(self, principal):
_log.debug("Security: set_principal %s" % principal)
if not isinstance(principal, dict):
return False
# Make sure all principal values are lists
self.principal = {k: list(v) if isinstance(v, (list, tuple, set)) else [v]
for k, v in principal.iteritems()}
# All default to unauthorized
self.auth = {k: [False]*len(v) for k, v in self.principal.iteritems()}
def authenticate_principal(self):
_log.debug("Security: authenticate_principal")
if not security_needed_check():
_log.debug("Security: authenticate_principal no security needed")
return True
if self.sec_conf['authentication']['procedure'] == "local_file":
_log.debug("Security: local file authentication method chosen")
return self.authenticate_using_local_database()
if self.sec_conf['authentication']['procedure'] == "radius":
if not HAS_PYRAD:
_log.error("Security: Install pyrad to use radius server as authentication method.\n" +
"NB! NO AUTHENTICATION USED")
return False
_log.info("Security: Radius authtentication method chosen")
return self.authenticate_using_radius_server()
_log.info("Security: No security config, so authentication disabled")
return True
def authenticate_using_radius_server(self):
auth = []
if self.principal['user']:
srv=Client(server=self.sec_conf['authentication']['server_ip'],
secret= bytes(self.sec_conf['authentication']['secret']),
dict=Dictionary("extras/pyrad_dicts/dictionary", "extras/pyrad_dicts/dictionary.acc"))
req=srv.CreateAuthPacket(code=pyrad.packet.AccessRequest,
User_Name=self.principal['user'][0],
NAS_Identifier="localhost")
req["User-Password"]=req.PwCrypt(self.principal['password'][0])
# FIXME is this over socket? then we should not block here
reply=srv.SendPacket(req)
_log.debug("Attributes returned by server:")
for i in reply.keys():
_log.debug("%s: %s" % (i, reply[i]))
if reply.code==pyrad.packet.AccessAccept:
_log.debug("Security:access accepted")
auth.append(True)
# return True
else:
_log.debug("Security: access denied")
auth.append(False)
# return False
self.auth['user']=auth
return any(auth)
def authenticate_using_local_database(self):
""" Authenticate a principal against config stored information
This is primarily intended for testing purposes,
since passwords arn't stored securily.
"""
if 'local_users' not in self.sec_conf['authentication']:
_log.debug("local_users not found in security_conf: %s" % self.sec_conf['authentication'])
return False
# Verify users against stored passwords
# TODO expand with other principal types
d = self.sec_conf['authentication']['local_users']
if not ('user' in self.principal and 'password' in self.principal):
return False
if len(self.principal['user']) != len(self.principal['password']):
return False
auth = []
for user, password in zip(self.principal['user'], self.principal['password']):
if user in d.keys():
if d[user] == password:
_log.debug("Security: found user: %s",user)
auth.append(True)
else:
_log.debug("Security: incorrect username or password")
auth.append(False)
else:
auth.append(False)
self.auth['user'] = auth
return any(auth)
def check_security_actor_requirements(self, requires):
_log.debug("Security: check_security_actor_requirements")
if self.sec_conf and self.sec_conf['access_control_enabled'] == "True":
for req in requires:
if not self.check_security_policy_actor(req, "user", self.principal):
return False
#no security config, so access control is disabled
return True
def check_security_policy_actor(self, req, principal_type, principal):
""" Checks that the requirement is allowed by the security policy """
_log.debug("Security: check_security_policy_actor")
#Calling function shall already have checked that self.sec_conf exist
#create list, e.g., ['calvinsys','media','camera','lense']
temp = req.split(".")
while len(temp) >0:
temp2 = '.'.join(temp)
# Satisfied when one principal match in one policy
for plcy in [p for p in self.sec_policy.values() if temp2 in p['resource']]:
if any([principal_name in plcy['principal'][principal_type]
for principal_type, principal_names in principal.iteritems()
if principal_type in plcy['principal']
for principal_name, auth in zip(principal_names, self.auth[principal_type])
if auth]):
_log.debug("Security: found a match for %s against %s" % (req, temp2))
return True
#Let's go up in hierarchy, e.g. if we found no policy for calvinsys.media.camera
#let's now try calvinsys.media instead
temp.pop()
#The user is not in the list of allowed users for the resource
_log.debug("Security: the principal does not have access rights to resource: %s" % req)
return False
@staticmethod
def verify_signature_get_files(filename, skip_file=False):
# Get the data
sign_filenames = filename + ".sign.*"
sign_content = {}
file_content = ""
sign_files = {os.path.basename(f).split(".sign.")[1]: f for f in glob.glob(sign_filenames)}
for cert_hash, sign_filename in sign_files.iteritems():
try:
with open(sign_filename, 'rt') as f:
sign_content[cert_hash] = f.read()
_log.debug("Security: found signature for %s" % cert_hash)
except:
pass
if not skip_file:
try:
with open(filename, 'rt') as f:
file_content = f.read()
except:
return None
_log.debug("Security: file can't be opened")
return {'sign': sign_content, 'file': file_content}
def verify_signature(self, file, flag):
content = Security.verify_signature_get_files(file)
if content:
return self.verify_signature_content(content, flag)
else:
return False
def verify_signature_content(self, content, flag):
_log.debug("Security: verify %s signature of %s" % (flag, content))
if not self.sec_conf:
_log.debug("Security: no signature verification required: %s"% content['file'])
return True
if flag not in ["application", "actor"]:
# TODO add component verification
raise NotImplementedError
# loop through the policies until one is found that applies to the principal
# Verification OK if sign and cert OK for any principal matching policy
# that have previously been authorized
for plcy in self.sec_policy.values():
_log.debug("Security: verify_signature policy: %s\nprincipal: %s\nauth:%s" %
(plcy, self.principal, self.auth))
if any([principal_name in plcy['principal'][principal_type]
for principal_type, principal_names in self.principal.iteritems()
if principal_type in plcy['principal']
for principal_name, auth in zip(principal_names, self.auth[principal_type])
if auth]):
_log.debug("Security: found a policy with matching principal")
if (flag + '_signature') in plcy:
if self.verify_signature_and_certificate(content, plcy, flag):
_log.debug("Security: signature verification successfull")
return True
_log.error("Security: verification of %s signature failed 1" % flag)
return False
def verify_signature_and_certificate(self, content, plcy, flag):
if "__unsigned__" in plcy[flag + '_signature']:
_log.debug("Security: %s is allowed unsigned" % flag)
return True
if content is None:
_log.debug("Security: %s need file and signature with certificate hash" % flag)
return False
if not content['sign']:
_log.debug("Security: %s signature information missing" % flag)
return False
if not HAS_OPENSSL:
_log.error("Security: Install openssl to allow verification of signatures and certificates")
_log.error("Security: verification of %s signature failed 2" % flag)
return False
_log.debug("Security:verify_signature_and_certificate")
for cert_hash, signature in content['sign'].iteritems():
try:
trusted_cert = os.path.join(self.sec_conf['signature_trust_store'], cert_hash + ".0")
with open(trusted_cert, 'rt') as f:
string_trusted_cert = f.read()
trusted_cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, string_trusted_cert)
if self.check_signature_policy(trusted_cert, flag, plcy):
try:
OpenSSL.crypto.verify(trusted_cert, signature, content['file'], 'sha256')
_log.debug("Security: %s signature correct" % flag)
return True
except Exception as e:
_log.debug("Security: OpenSSL verification error", exc_info=True)
continue
else:
_log.debug("Security: signature policy not fulfilled")
continue
except Exception as e:
_log.debug("Security: error opening one of the needed certificates", exc_info=True)
continue
_log.error("Security: verification of %s signature failed 3" % flag)
return False
def check_signature_policy(self, cert, flag, plcy):
""" Checks that if the signer is allowed by the security policy """
_log.debug("Security:check_signature_policy")
if flag=="application":
if 'application_signature' in plcy:
if cert.get_issuer().CN not in plcy['application_signature']:
_log.debug("Security: application signer not allowed")
return False
else:
_log.debug("Security: no application_signature element, unsigned applications allowed")
elif flag=="actor":
if 'actor_signature' in plcy:
if cert.get_issuer().CN not in plcy['actor_signature']:
_log.debug("Security: actor signer not allowed")
return False
else:
_log.debug("Security: no actor_signature element, unsigned applications allowed")
return True
| |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers
from google.api_core import operations_v1
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.ids_v1.types import ids
from google.longrunning import operations_pb2 # type: ignore
from .base import IDSTransport, DEFAULT_CLIENT_INFO
class IDSGrpcTransport(IDSTransport):
"""gRPC backend transport for IDS.
The IDS Service
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "ids.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "ids.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
# Return the client from cache.
return self._operations_client
@property
def list_endpoints(
self,
) -> Callable[[ids.ListEndpointsRequest], ids.ListEndpointsResponse]:
r"""Return a callable for the list endpoints method over gRPC.
Lists Endpoints in a given project and location.
Returns:
Callable[[~.ListEndpointsRequest],
~.ListEndpointsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_endpoints" not in self._stubs:
self._stubs["list_endpoints"] = self.grpc_channel.unary_unary(
"/google.cloud.ids.v1.IDS/ListEndpoints",
request_serializer=ids.ListEndpointsRequest.serialize,
response_deserializer=ids.ListEndpointsResponse.deserialize,
)
return self._stubs["list_endpoints"]
@property
def get_endpoint(self) -> Callable[[ids.GetEndpointRequest], ids.Endpoint]:
r"""Return a callable for the get endpoint method over gRPC.
Gets details of a single Endpoint.
Returns:
Callable[[~.GetEndpointRequest],
~.Endpoint]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_endpoint" not in self._stubs:
self._stubs["get_endpoint"] = self.grpc_channel.unary_unary(
"/google.cloud.ids.v1.IDS/GetEndpoint",
request_serializer=ids.GetEndpointRequest.serialize,
response_deserializer=ids.Endpoint.deserialize,
)
return self._stubs["get_endpoint"]
@property
def create_endpoint(
self,
) -> Callable[[ids.CreateEndpointRequest], operations_pb2.Operation]:
r"""Return a callable for the create endpoint method over gRPC.
Creates a new Endpoint in a given project and
location.
Returns:
Callable[[~.CreateEndpointRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_endpoint" not in self._stubs:
self._stubs["create_endpoint"] = self.grpc_channel.unary_unary(
"/google.cloud.ids.v1.IDS/CreateEndpoint",
request_serializer=ids.CreateEndpointRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_endpoint"]
@property
def delete_endpoint(
self,
) -> Callable[[ids.DeleteEndpointRequest], operations_pb2.Operation]:
r"""Return a callable for the delete endpoint method over gRPC.
Deletes a single Endpoint.
Returns:
Callable[[~.DeleteEndpointRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_endpoint" not in self._stubs:
self._stubs["delete_endpoint"] = self.grpc_channel.unary_unary(
"/google.cloud.ids.v1.IDS/DeleteEndpoint",
request_serializer=ids.DeleteEndpointRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_endpoint"]
def close(self):
self.grpc_channel.close()
__all__ = ("IDSGrpcTransport",)
| |
#!/usr/bin/env python3
# Copyright 2020 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Tests the ELF reader Python module."""
import io
import os
import re
import unittest
from pw_tokenizer import elf_reader
# Output from the following command:
#
# readelf -WS elf_reader_test_binary.elf
#
TEST_READELF_OUTPUT = ("""
There are 33 section headers, starting at offset 0x1758:
Section Headers:
[Nr] Name Type Address Off Size ES Flg Lk Inf Al
[ 0] NULL 0000000000000000 000000 000000 00 0 0 0
[ 1] .interp PROGBITS 0000000000000238 000238 00001c 00 A 0 0 1
[ 2] .note.ABI-tag NOTE 0000000000000254 000254 000020 00 A 0 0 4
[ 3] .note.gnu.build-id NOTE 0000000000000274 000274 000024 00 A 0 0 4
[ 4] .dynsym DYNSYM 0000000000000298 000298 0000a8 18 A 5 1 8
[ 5] .dynstr STRTAB 0000000000000340 000340 00009b 00 A 0 0 1
[ 6] .gnu.hash GNU_HASH 00000000000003e0 0003e0 00001c 00 A 4 0 8
[ 7] .gnu.version VERSYM 00000000000003fc 0003fc 00000e 02 A 4 0 2
[ 8] .gnu.version_r VERNEED 000000000000040c 00040c 000020 00 A 5 1 4
[ 9] .rela.dyn RELA 0000000000000430 000430 0000d8 18 A 4 0 8
[10] .rela.plt RELA 0000000000000508 000508 000018 18 AI 4 12 8
[11] .init PROGBITS 0000000000000520 000520 000017 00 AX 0 0 4
[12] .plt PROGBITS 0000000000000540 000540 000020 10 AX 0 0 16
[13] .text PROGBITS 0000000000000560 000560 000151 00 AX 0 0 16
[14] .fini PROGBITS 00000000000006b4 0006b4 000009 00 AX 0 0 4
[15] .rodata PROGBITS 00000000000006c0 0006c0 000004 04 AM 0 0 4
[16] .test_section_1 PROGBITS 00000000000006d0 0006d0 000010 00 A 0 0 16
[17] .test_section_2 PROGBITS 00000000000006e0 0006e0 000004 00 A 0 0 4
[18] .eh_frame X86_64_UNWIND 00000000000006e8 0006e8 0000d4 00 A 0 0 8
[19] .eh_frame_hdr X86_64_UNWIND 00000000000007bc 0007bc 00002c 00 A 0 0 4
[20] .fini_array FINI_ARRAY 0000000000001d80 000d80 000008 08 WA 0 0 8
[21] .init_array INIT_ARRAY 0000000000001d88 000d88 000008 08 WA 0 0 8
[22] .dynamic DYNAMIC 0000000000001d90 000d90 000220 10 WA 5 0 8
[23] .got PROGBITS 0000000000001fb0 000fb0 000030 00 WA 0 0 8
[24] .got.plt PROGBITS 0000000000001fe0 000fe0 000020 00 WA 0 0 8
[25] .data PROGBITS 0000000000002000 001000 000010 00 WA 0 0 8
[26] .tm_clone_table PROGBITS 0000000000002010 001010 000000 00 WA 0 0 8
[27] .bss NOBITS 0000000000002010 001010 000001 00 WA 0 0 1
[28] .comment PROGBITS 0000000000000000 001010 00001d 01 MS 0 0 1
[29] .note.gnu.gold-version NOTE 0000000000000000 001030 00001c 00 0 0 4
[30] .symtab SYMTAB 0000000000000000 001050 000390 18 31 21 8
[31] .strtab STRTAB 0000000000000000 0013e0 000227 00 0 0 1
[32] .shstrtab STRTAB 0000000000000000 001607 00014a 00 0 0 1
Key to Flags:
W (write), A (alloc), X (execute), M (merge), S (strings), I (info),
L (link order), O (extra OS processing required), G (group), T (TLS),
C (compressed), x (unknown), o (OS specific), E (exclude),
l (large), p (processor specific)
""")
TEST_ELF_PATH = os.path.join(os.path.dirname(__file__),
'elf_reader_test_binary.elf')
class ElfReaderTest(unittest.TestCase):
"""Tests the elf_reader.Elf class."""
def setUp(self):
super().setUp()
self._elf_file = open(TEST_ELF_PATH, 'rb')
self._elf = elf_reader.Elf(self._elf_file)
def tearDown(self):
super().tearDown()
self._elf_file.close()
def _section(self, name):
return next(self._elf.sections_with_name(name))
def test_readelf_comparison_using_the_readelf_binary(self):
"""Compares elf_reader to readelf's output."""
parse_readelf_output = re.compile(r'\s+'
r'\[\s*(?P<number>\d+)\]\s+'
r'(?P<name>\.\S*)?\s+'
r'(?P<type>\S+)\s+'
r'(?P<addr>[0-9a-fA-F]+)\s+'
r'(?P<offset>[0-9a-fA-F]+)\s+'
r'(?P<size>[0-9a-fA-F]+)\s+')
readelf_sections = []
for number, name, _, addr, offset, size in parse_readelf_output.findall(
TEST_READELF_OUTPUT):
readelf_sections.append((
int(number),
name or '',
int(addr, 16),
int(offset, 16),
int(size, 16),
))
self.assertEqual(len(readelf_sections), 33)
self.assertEqual(len(readelf_sections), len(self._elf.sections))
for (index,
section), readelf_section in zip(enumerate(self._elf.sections),
readelf_sections):
readelf_index, name, address, offset, size = readelf_section
self.assertEqual(index, readelf_index)
self.assertEqual(section.name, name)
self.assertEqual(section.address, address)
self.assertEqual(section.offset, offset)
self.assertEqual(section.size, size)
def test_dump_single_section(self):
self.assertEqual(self._elf.dump_section_contents(r'\.test_section_1'),
b'You cannot pass\0')
self.assertEqual(self._elf.dump_section_contents(r'\.test_section_2'),
b'\xef\xbe\xed\xfe')
def test_dump_multiple_sections(self):
if (self._section('.test_section_1').address <
self._section('.test_section_2').address):
contents = b'You cannot pass\0\xef\xbe\xed\xfe'
else:
contents = b'\xef\xbe\xed\xfeYou cannot pass\0'
self.assertIn(self._elf.dump_section_contents(r'.test_section_\d'),
contents)
def test_read_values(self):
address = self._section('.test_section_1').address
self.assertEqual(self._elf.read_value(address), b'You cannot pass')
int32_address = self._section('.test_section_2').address
self.assertEqual(self._elf.read_value(int32_address, 4),
b'\xef\xbe\xed\xfe')
def test_read_string(self):
bytes_io = io.BytesIO(
b'This is a null-terminated string\0No terminator!')
self.assertEqual(elf_reader.read_c_string(bytes_io),
b'This is a null-terminated string')
self.assertEqual(elf_reader.read_c_string(bytes_io), b'No terminator!')
self.assertEqual(elf_reader.read_c_string(bytes_io), b'')
def test_compatible_file_for_elf(self):
self.assertTrue(elf_reader.compatible_file(self._elf_file))
self.assertTrue(elf_reader.compatible_file(io.BytesIO(b'\x7fELF')))
def test_compatible_file_for_elf_start_at_offset(self):
self._elf_file.seek(13) # Seek ahead to get out of sync
self.assertTrue(elf_reader.compatible_file(self._elf_file))
self.assertEqual(13, self._elf_file.tell())
def test_compatible_file_for_invalid_elf(self):
self.assertFalse(elf_reader.compatible_file(io.BytesIO(b'\x7fELVESF')))
def _archive_file(data: bytes) -> bytes:
return ('FILE ID 90123456'
'MODIFIED 012'
'OWNER '
'GROUP '
'MODE 678'
f'{len(data):10}' # File size -- the only part that's needed.
'`\n'.encode() + data)
class ArchiveTest(unittest.TestCase):
"""Tests reading from archive files."""
def setUp(self):
super().setUp()
with open(TEST_ELF_PATH, 'rb') as fd:
self._elf_data = fd.read()
self._archive_entries = b'blah', b'hello', self._elf_data
self._archive_data = elf_reader.ARCHIVE_MAGIC + b''.join(
_archive_file(f) for f in self._archive_entries)
self._archive = io.BytesIO(self._archive_data)
def test_compatible_file_for_archive(self):
self.assertTrue(elf_reader.compatible_file(io.BytesIO(b'!<arch>\n')))
self.assertTrue(elf_reader.compatible_file(self._archive))
def test_compatible_file_for_invalid_archive(self):
self.assertFalse(elf_reader.compatible_file(io.BytesIO(b'!<arch>')))
def test_iterate_over_files(self):
for expected, size in zip(self._archive_entries,
elf_reader.files_in_archive(self._archive)):
self.assertEqual(expected, self._archive.read(size))
def test_iterate_over_empty_archive(self):
with self.assertRaises(StopIteration):
next(iter(elf_reader.files_in_archive(io.BytesIO(b'!<arch>\n'))))
def test_iterate_over_invalid_archive(self):
with self.assertRaises(elf_reader.FileDecodeError):
for _ in elf_reader.files_in_archive(
io.BytesIO(b'!<arch>blah blahblah')):
pass
def test_extra_newline_after_entry_is_ignored(self):
archive = io.BytesIO(elf_reader.ARCHIVE_MAGIC +
_archive_file(self._elf_data) + b'\n' +
_archive_file(self._elf_data))
for size in elf_reader.files_in_archive(archive):
self.assertEqual(self._elf_data, archive.read(size))
def test_two_extra_newlines_parsing_fails(self):
archive = io.BytesIO(elf_reader.ARCHIVE_MAGIC +
_archive_file(self._elf_data) + b'\n\n' +
_archive_file(self._elf_data))
with self.assertRaises(elf_reader.FileDecodeError):
for size in elf_reader.files_in_archive(archive):
self.assertEqual(self._elf_data, archive.read(size))
def test_iterate_over_archive_with_invalid_size(self):
data = elf_reader.ARCHIVE_MAGIC + _archive_file(b'$' * 3210)
file = io.BytesIO(data)
# Iterate over the file normally.
for size in elf_reader.files_in_archive(file):
self.assertEqual(b'$' * 3210, file.read(size))
# Replace the size with a hex number, which is not valid.
with self.assertRaises(elf_reader.FileDecodeError):
for _ in elf_reader.files_in_archive(
io.BytesIO(data.replace(b'3210', b'0x99'))):
pass
def test_elf_reader_dump_single_section(self):
elf = elf_reader.Elf(self._archive)
self.assertEqual(elf.dump_section_contents(r'\.test_section_1'),
b'You cannot pass\0')
self.assertEqual(elf.dump_section_contents(r'\.test_section_2'),
b'\xef\xbe\xed\xfe')
def test_elf_reader_read_values(self):
elf = elf_reader.Elf(self._archive)
address = next(elf.sections_with_name('.test_section_1')).address
self.assertEqual(elf.read_value(address), b'You cannot pass')
int32_address = next(elf.sections_with_name('.test_section_2')).address
self.assertEqual(elf.read_value(int32_address, 4), b'\xef\xbe\xed\xfe')
if __name__ == '__main__':
unittest.main()
| |
from Avatar import Avatar
from direct.actor.DistributedActor import DistributedActor
from direct.distributed import DistributedNode
from direct.interval.IntervalGlobal import *
from direct.showbase import PythonUtil
from direct.task import Task
from otp.ai.MagicWordGlobal import *
from otp.nametag.Nametag import Nametag
from otp.otpbase import OTPGlobals
from pandac.PandaModules import *
from toontown.battle.BattleProps import globalPropPool
class DistributedAvatar(DistributedActor, Avatar):
HpTextGenerator = TextNode('HpTextGenerator')
HpTextEnabled = 1
ManagesNametagAmbientLightChanged = True
def __init__(self, cr):
try:
self.DistributedAvatar_initialized
return
except:
self.DistributedAvatar_initialized = 1
Avatar.__init__(self)
DistributedActor.__init__(self, cr)
self.hpText = None
self.hp = None
self.maxHp = None
return
def disable(self):
try:
del self.DistributedAvatar_announced
except:
return
self.reparentTo(hidden)
self.removeActive()
self.disableBodyCollisions()
self.hideHpText()
self.hp = None
self.ignore('nameTagShowAvId')
self.ignore('nameTagShowName')
DistributedActor.disable(self)
return
def delete(self):
try:
self.DistributedAvatar_deleted
except:
self.DistributedAvatar_deleted = 1
Avatar.delete(self)
DistributedActor.delete(self)
def generate(self):
DistributedActor.generate(self)
if not self.isLocal():
self.addActive()
self.considerUnderstandable()
self.setParent(OTPGlobals.SPHidden)
self.setTag('avatarDoId', str(self.doId))
self.accept('nameTagShowAvId', self.__nameTagShowAvId)
self.accept('nameTagShowName', self.__nameTagShowName)
def announceGenerate(self):
try:
self.DistributedAvatar_announced
return
except:
self.DistributedAvatar_announced = 1
if not self.isLocal():
self.initializeBodyCollisions('distAvatarCollNode-' + str(self.doId))
DistributedActor.announceGenerate(self)
def __setTags(self, extra = None):
if hasattr(base, 'idTags'):
if base.idTags:
self.__nameTagShowAvId()
else:
self.__nameTagShowName()
def do_setParent(self, parentToken):
if not self.isDisabled():
if parentToken == OTPGlobals.SPHidden:
self.nametag2dDist &= ~Nametag.CName
else:
self.nametag2dDist |= Nametag.CName
self.nametag.getNametag2d().setContents(self.nametag2dContents & self.nametag2dDist)
DistributedActor.do_setParent(self, parentToken)
self.__setTags()
def toonUp(self, hpGained):
if self.hp == None or hpGained < 0:
return
oldHp = self.hp
if self.hp + hpGained <= 0:
self.hp += hpGained
else:
self.hp = min(max(self.hp, 0) + hpGained, self.maxHp)
hpGained = self.hp - max(oldHp, 0)
if hpGained > 0:
self.showHpText(hpGained)
self.hpChange(quietly=0)
return
def takeDamage(self, hpLost, bonus = 0):
if self.hp == None or hpLost < 0:
return
oldHp = self.hp
self.hp = max(self.hp - hpLost, 0)
hpLost = oldHp - self.hp
if hpLost > 0:
self.showHpText(-hpLost, bonus)
self.hpChange(quietly=0)
if self.hp <= 0 and oldHp > 0:
self.died()
return
def setHp(self, hitPoints):
justRanOutOfHp = (hitPoints is not None and self.hp is not None and self.hp - hitPoints > 0) and (hitPoints <= 0)
self.hp = hitPoints
self.hpChange(quietly=1)
if justRanOutOfHp:
self.died()
return
def hpChange(self, quietly = 0):
if hasattr(self, 'doId'):
if self.hp != None and self.maxHp != None:
messenger.send(self.uniqueName('hpChange'), [self.hp, self.maxHp, quietly])
if self.hp != None and self.hp > 0:
messenger.send(self.uniqueName('positiveHP'))
return
def died(self):
pass
def getHp(self):
return self.hp
def setMaxHp(self, hitPoints):
self.maxHp = hitPoints
self.hpChange()
def getMaxHp(self):
return self.maxHp
def getName(self):
return Avatar.getName(self)
def setName(self, name):
try:
self.node().setName('%s-%d' % (name, self.doId))
self.gotName = 1
except:
pass
return Avatar.setName(self, name)
def showHpText(self, number, bonus = 0, scale = 1):
if self.HpTextEnabled and not self.ghostMode:
if number != 0:
if self.hpText:
self.hideHpText()
self.HpTextGenerator.setFont(OTPGlobals.getSignFont())
if number < 0:
self.HpTextGenerator.setText(str(number))
else:
self.HpTextGenerator.setText('+' + str(number))
self.HpTextGenerator.clearShadow()
self.HpTextGenerator.setAlign(TextNode.ACenter)
if bonus == 1:
r = 1.0
g = 1.0
b = 0
a = 1
elif bonus == 2:
r = 1.0
g = 0.5
b = 0
a = 1
elif number < 0:
r = 0.9
g = 0
b = 0
a = 1
else:
r = 0
g = 0.9
b = 0
a = 1
self.HpTextGenerator.setTextColor(r, g, b, a)
self.hpTextNode = self.HpTextGenerator.generate()
self.hpText = self.attachNewNode(self.hpTextNode)
self.hpText.setScale(scale)
self.hpText.setBillboardPointEye()
self.hpText.setBin('fixed', 100)
self.hpText.setPos(0, 0, self.height / 2)
seq = Sequence(self.hpText.posInterval(1.0, Point3(0, 0, self.height + 1.5), blendType='easeOut'), Wait(0.85), self.hpText.colorInterval(0.1, Vec4(r, g, b, 0)), Func(self.hideHpText))
seq.start()
def showHpString(self, text, duration = 0.85, scale = 0.7):
if self.HpTextEnabled and not self.ghostMode:
if text != '':
if self.hpText:
self.hideHpText()
self.HpTextGenerator.setFont(OTPGlobals.getSignFont())
self.HpTextGenerator.setText(text)
self.HpTextGenerator.clearShadow()
self.HpTextGenerator.setAlign(TextNode.ACenter)
r = a = 1.0
g = b = 0.0
self.HpTextGenerator.setTextColor(r, g, b, a)
self.hpTextNode = self.HpTextGenerator.generate()
self.hpText = self.attachNewNode(self.hpTextNode)
self.hpText.setScale(scale)
self.hpText.setBillboardAxis()
self.hpText.setPos(0, 0, self.height / 2)
seq = Sequence(self.hpText.posInterval(1.0, Point3(0, 0, self.height + 1.5), blendType='easeOut'), Wait(duration), self.hpText.colorInterval(0.1, Vec4(r, g, b, 0)), Func(self.hideHpText))
seq.start()
def hideHpText(self):
if self.hpText:
taskMgr.remove(self.uniqueName('hpText'))
self.hpText.removeNode()
self.hpText = None
return
def getStareAtNodeAndOffset(self):
return (self, Point3(0, 0, self.height))
def getAvIdName(self):
return '%s\n%s' % (self.getName(), self.doId)
def __nameTagShowAvId(self, extra = None):
self.setDisplayName(self.getAvIdName())
def __nameTagShowName(self, extra = None):
self.setDisplayName(self.getName())
def askAvOnShard(self, avId):
if base.cr.doId2do.get(avId):
messenger.send('AvOnShard%s' % avId, [True])
else:
self.sendUpdate('checkAvOnShard', [avId])
def confirmAvOnShard(self, avId, onShard = True):
messenger.send('AvOnShard%s' % avId, [onShard])
def getDialogueArray(self):
return None
@magicWord(category=CATEGORY_COMMUNITY_MANAGER)
def warp():
"""
warp the target to the invoker's current position, and rotation.
"""
invoker = spellbook.getInvoker()
target = spellbook.getTarget()
if invoker.doId == target.doId:
return "You can't warp yourself!"
target.setPosHpr(invoker.getPos(), invoker.getHpr())
@magicWord(category=CATEGORY_COMMUNITY_MANAGER, types=[str])
def loop(anim):
"""
animate the target using animation [anim] on the entire actor.
"""
target = spellbook.getTarget()
target.loop(anim)
@magicWord(category=CATEGORY_COMMUNITY_MANAGER, types=[str, int, str])
def pose(anim, frame, part=None):
"""
freeze the target on frame [frame] of animation [anim] on the entire actor,
or optional [part] of the actor.
"""
target = spellbook.getTarget()
target.pose(anim, frame, partName=part)
@magicWord(category=CATEGORY_COMMUNITY_MANAGER, types=[str, int, int, str])
def pingpong(anim, start=None, end=None, part=None):
"""
animate the target by bouncing back and forth between the start and end, or
the optional frames <start>, and [end] of animation [anim] on the entire
actor, or optional <part> of the actor.
"""
target = spellbook.getTarget()
target.pingpong(anim, partName=part, fromFrame=start, toFrame=end)
@magicWord(category=CATEGORY_COMMUNITY_MANAGER, types=[str])
def rightHand(prop=None):
"""
parents the optional <prop> to the target's right hand node.
"""
target = spellbook.getTarget()
rightHand = target.find('**/rightHand')
if prop is None:
for child in rightHand.getChildren():
child.removeNode()
else:
for child in rightHand.getChildren():
child.removeNode()
requestedProp = globalPropPool.getProp(prop)
requestedProp.reparentTo(rightHand)
@magicWord(category=CATEGORY_COMMUNITY_MANAGER, types=[str])
def leftHand(prop=None):
"""
parents the optional <prop> to the target's left hand node.
"""
target = spellbook.getTarget()
leftHand = target.find('**/leftHand')
if prop is None:
for child in leftHand.getChildren():
child.removeNode()
else:
for child in leftHand.getChildren():
child.removeNode()
requestedProp = globalPropPool.getProp(prop)
requestedProp.reparentTo(leftHand)
| |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from textwrap import dedent
from pants.backend.core.targets.dependencies import Dependencies
from pants.backend.core.targets.doc import Page
from pants.backend.core.tasks.filter import Filter
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.python.targets.python_library import PythonLibrary
from pants.backend.python.targets.python_requirement_library import PythonRequirementLibrary
from pants.base.exceptions import TaskError
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants_test.tasks.task_test_base import ConsoleTaskTestBase
class BaseFilterTest(ConsoleTaskTestBase):
@property
def alias_groups(self):
return BuildFileAliases(
targets={
'target': Dependencies,
'java_library': JavaLibrary,
'page': Page,
'python_library': PythonLibrary,
'python_requirement_library': PythonRequirementLibrary,
}
)
@classmethod
def task_type(cls):
return Filter
class FilterEmptyTargetsTest(BaseFilterTest):
def test_no_filters(self):
self.assert_console_output()
def test_type(self):
self.assert_console_output(options={'type': ['page']})
self.assert_console_output(options={'type': ['java_library']})
def test_regex(self):
self.assert_console_output(options={'regex': ['^common']})
self.assert_console_output(options={'regex': ['-^common']})
class FilterTest(BaseFilterTest):
def setUp(self):
super(FilterTest, self).setUp()
requirement_injected = set()
def add_to_build_file(path, name, *deps):
if path not in requirement_injected:
self.add_to_build_file(path, "python_requirement_library(name='foo')")
requirement_injected.add(path)
all_deps = ["'{0}'".format(dep) for dep in deps] + ["':foo'"]
self.add_to_build_file(path, dedent("""
python_library(name='{name}',
dependencies=[{all_deps}],
tags=['{tag}']
)
""".format(name=name, tag=name + "_tag", all_deps=','.join(all_deps))))
add_to_build_file('common/a', 'a')
add_to_build_file('common/b', 'b')
add_to_build_file('common/c', 'c')
add_to_build_file('overlaps', 'one', 'common/a', 'common/b')
add_to_build_file('overlaps', 'two', 'common/a', 'common/c')
add_to_build_file('overlaps', 'three', 'common/a', 'overlaps:one')
def test_roots(self):
self.assert_console_output(
'common/a:a',
'common/a:foo',
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
targets=self.targets('common/::'),
extra_targets=self.targets('overlaps/::')
)
def test_nodups(self):
targets = [self.target('common/b')] * 2
self.assertEqual(2, len(targets))
self.assert_console_output(
'common/b:b',
targets=targets
)
def test_no_filters(self):
self.assert_console_output(
'common/a:a',
'common/a:foo',
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
'overlaps:one',
'overlaps:two',
'overlaps:three',
'overlaps:foo',
targets=self.targets('::')
)
def test_filter_type(self):
self.assert_console_output(
'common/a:a',
'common/b:b',
'common/c:c',
'overlaps:one',
'overlaps:two',
'overlaps:three',
targets=self.targets('::'),
options={'type': ['python_library']}
)
self.assert_console_output(
'common/a:foo',
'common/b:foo',
'common/c:foo',
'overlaps:foo',
targets=self.targets('::'),
options={'type': ['-python_library']}
)
self.assert_console_output(
'common/a:a',
'common/a:foo',
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
'overlaps:one',
'overlaps:two',
'overlaps:three',
'overlaps:foo',
targets=self.targets('::'),
# Note that the comma is inside the string, so these are ORed.
options={'type': ['python_requirement_library,python_library']}
)
def test_filter_multiple_types(self):
# A target can only have one type, so the output should be empty.
self.assert_console_output(
targets=self.targets('::'),
options={'type': ['python_requirement_library', 'python_library']}
)
def test_filter_target(self):
self.assert_console_output(
'common/a:a',
'overlaps:foo',
targets=self.targets('::'),
options={'target': ['common/a,overlaps/:foo']}
)
self.assert_console_output(
'common/a:foo',
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
'overlaps:two',
'overlaps:three',
targets=self.targets('::'),
options={'target': ['-common/a:a,overlaps:one,overlaps:foo']}
)
def test_filter_ancestor(self):
self.assert_console_output(
'common/a:a',
'common/a:foo',
'common/b:b',
'common/b:foo',
'overlaps:one',
'overlaps:foo',
targets=self.targets('::'),
options={'ancestor': ['overlaps:one,overlaps:foo']}
)
self.assert_console_output(
'common/c:c',
'common/c:foo',
'overlaps:two',
'overlaps:three',
targets=self.targets('::'),
options={'ancestor': ['-overlaps:one,overlaps:foo']}
)
def test_filter_ancestor_out_of_context(self):
"""Tests that targets outside of the context used as filters are parsed before use."""
# Add an additional un-injected target, and then use it as a filter.
self.add_to_build_file("blacklist", "target(name='blacklist', dependencies=['common/a'])")
self.assert_console_output(
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
'overlaps:one',
'overlaps:two',
'overlaps:three',
'overlaps:foo',
targets=self.targets('::'),
options={'ancestor': ['-blacklist']}
)
def test_filter_ancestor_not_passed_targets(self):
"""Tests filtering targets based on an ancestor not in that list of targets."""
# Add an additional un-injected target, and then use it as a filter.
self.add_to_build_file("blacklist", "target(name='blacklist', dependencies=['common/a'])")
self.assert_console_output(
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
targets=self.targets('common/::'), # blacklist is not in the list of targets
options={'ancestor': ['-blacklist']}
)
self.assert_console_output(
'common/a:a', # a: _should_ show up if we don't filter.
'common/a:foo',
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
targets=self.targets('common/::'),
options={'ancestor': []}
)
def test_filter_regex(self):
self.assert_console_output(
'common/a:a',
'common/a:foo',
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
targets=self.targets('::'),
options={'regex': ['^common']}
)
self.assert_console_output(
'common/a:foo',
'common/b:foo',
'common/c:foo',
'overlaps:one',
'overlaps:two',
'overlaps:three',
'overlaps:foo',
targets=self.targets('::'),
options={'regex': ['+foo,^overlaps']}
)
self.assert_console_output(
'overlaps:one',
'overlaps:two',
'overlaps:three',
targets=self.targets('::'),
options={'regex': ['-^common,foo$']}
)
# Invalid regex.
self.assert_console_raises(TaskError,
targets=self.targets('::'),
options={'regex': ['abc)']}
)
def test_filter_tag_regex(self):
# Filter two.
self.assert_console_output(
'overlaps:three',
targets=self.targets('::'),
options={'tag_regex': ['+e(?=e)']}
)
# Removals.
self.assert_console_output(
'common/a:a',
'common/a:foo',
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
'overlaps:foo',
'overlaps:three',
targets=self.targets('::'),
options={'tag_regex': ['-one|two']}
)
# Invalid regex.
self.assert_console_raises(TaskError,
targets=self.targets('::'),
options={'tag_regex': ['abc)']}
)
def test_filter_tag(self):
# One match.
self.assert_console_output(
'common/a:a',
targets=self.targets('::'),
options={'tag': ['+a_tag']}
)
# Two matches.
self.assert_console_output(
'common/a:a',
'common/b:b',
targets=self.targets('::'),
options={'tag': ['+a_tag,b_tag']}
)
# One removal.
self.assert_console_output(
'common/a:a',
'common/a:foo',
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
'overlaps:foo',
'overlaps:two',
'overlaps:three',
targets=self.targets('::'),
options={'tag': ['-one_tag']}
)
# Two removals.
self.assert_console_output(
'common/a:a',
'common/a:foo',
'common/b:b',
'common/b:foo',
'common/c:c',
'common/c:foo',
'overlaps:foo',
'overlaps:three',
targets=self.targets('::'),
options={'tag': ['-one_tag,two_tag']}
)
# No match.
self.assert_console_output(
targets=self.targets('::'),
options={'tag': ['+abcdefg_tag']}
)
# No match due to AND of separate predicates.
self.assert_console_output(
targets=self.targets('::'),
options={'tag': ['a_tag', 'b_tag']}
)
| |
from mock import patch
from mock import Mock
from lasagne.layers import Conv2DLayer
from lasagne.layers import DenseLayer
from lasagne.layers import MaxPool2DLayer
from lasagne.layers import InputLayer
from lasagne.nonlinearities import softmax
from lasagne.updates import nesterov_momentum
import pytest
from nolearn._compat import builtins
def test_print_log(mnist):
from nolearn.lasagne import PrintLog
nn = Mock(
regression=False,
custom_score=('my_score', 0.99),
)
train_history = [{
'epoch': 1,
'train_loss': 0.8,
'valid_loss': 0.7,
'train_loss_best': False,
'valid_loss_best': False,
'valid_accuracy': 0.9,
'my_score': 0.99,
'dur': 1.0,
}]
output = PrintLog().table(nn, train_history)
assert output == """\
epoch train loss valid loss train/val valid acc my_score dur
------- ------------ ------------ ----------- ----------- ---------- -----
1 0.80000 0.70000 1.14286 0.90000 0.99000 1.00s\
"""
class TestSaveWeights():
@pytest.fixture
def SaveWeights(self):
from nolearn.lasagne import SaveWeights
return SaveWeights
def test_every_n_epochs_true(self, SaveWeights):
train_history = [{'epoch': 9, 'valid_loss': 1.1}]
nn = Mock()
handler = SaveWeights('mypath', every_n_epochs=3)
handler(nn, train_history)
assert nn.save_params_to.call_count == 1
nn.save_params_to.assert_called_with('mypath')
def test_every_n_epochs_false(self, SaveWeights):
train_history = [{'epoch': 9, 'valid_loss': 1.1}]
nn = Mock()
handler = SaveWeights('mypath', every_n_epochs=4)
handler(nn, train_history)
assert nn.save_params_to.call_count == 0
def test_only_best_true_single_entry(self, SaveWeights):
train_history = [{'epoch': 9, 'valid_loss': 1.1}]
nn = Mock()
handler = SaveWeights('mypath', only_best=True)
handler(nn, train_history)
assert nn.save_params_to.call_count == 1
def test_only_best_true_two_entries(self, SaveWeights):
train_history = [
{'epoch': 9, 'valid_loss': 1.2},
{'epoch': 10, 'valid_loss': 1.1},
]
nn = Mock()
handler = SaveWeights('mypath', only_best=True)
handler(nn, train_history)
assert nn.save_params_to.call_count == 1
def test_only_best_false_two_entries(self, SaveWeights):
train_history = [
{'epoch': 9, 'valid_loss': 1.2},
{'epoch': 10, 'valid_loss': 1.3},
]
nn = Mock()
handler = SaveWeights('mypath', only_best=True)
handler(nn, train_history)
assert nn.save_params_to.call_count == 0
def test_with_path_interpolation(self, SaveWeights):
train_history = [{'epoch': 9, 'valid_loss': 1.1}]
nn = Mock()
handler = SaveWeights('mypath-{epoch}-{timestamp}-{loss}.pkl')
handler(nn, train_history)
path = nn.save_params_to.call_args[0][0]
assert path.startswith('mypath-0009-2')
assert path.endswith('-1.1.pkl')
def test_pickle(self, SaveWeights):
train_history = [{'epoch': 9, 'valid_loss': 1.1}]
nn = Mock()
with patch('nolearn.lasagne.handlers.pickle') as pickle:
with patch.object(builtins, 'open') as mock_open:
handler = SaveWeights('mypath', every_n_epochs=3, pickle=True)
handler(nn, train_history)
mock_open.assert_called_with('mypath', 'wb')
pickle.dump.assert_called_with(nn, mock_open().__enter__(), -1)
class TestPrintLayerInfo():
@pytest.fixture(scope='session')
def X_train(self, mnist):
X, y = mnist
return X[:100].reshape(-1, 1, 28, 28)
@pytest.fixture(scope='session')
def y_train(self, mnist):
X, y = mnist
return y[:100]
@pytest.fixture(scope='session')
def nn(self, NeuralNet, X_train, y_train):
nn = NeuralNet(
layers=[
('input', InputLayer),
('dense0', DenseLayer),
('dense1', DenseLayer),
('output', DenseLayer),
],
input_shape=(None, 1, 28, 28),
output_num_units=10,
output_nonlinearity=softmax,
more_params=dict(
dense0_num_units=16,
dense1_num_units=16,
),
update=nesterov_momentum,
update_learning_rate=0.01,
update_momentum=0.9,
max_epochs=3,
)
nn.initialize()
return nn
@pytest.fixture(scope='session')
def cnn(self, NeuralNet, X_train, y_train):
nn = NeuralNet(
layers=[
('input', InputLayer),
('conv1', Conv2DLayer),
('conv2', Conv2DLayer),
('pool2', MaxPool2DLayer),
('conv3', Conv2DLayer),
('output', DenseLayer),
],
input_shape=(None, 1, 28, 28),
output_num_units=10,
output_nonlinearity=softmax,
more_params=dict(
conv1_filter_size=5, conv1_num_filters=16,
conv2_filter_size=3, conv2_num_filters=16,
pool2_pool_size=8, pool2_ignore_border=False,
conv3_filter_size=3, conv3_num_filters=16,
hidden1_num_units=16,
),
update=nesterov_momentum,
update_learning_rate=0.01,
update_momentum=0.9,
max_epochs=3,
)
nn.initialize()
return nn
@pytest.fixture
def is_conv2d(self):
from nolearn.lasagne.util import is_conv2d
return is_conv2d
@pytest.fixture
def is_maxpool2d(self):
from nolearn.lasagne.util import is_maxpool2d
return is_maxpool2d
@pytest.fixture
def print_info(self):
from nolearn.lasagne.handlers import PrintLayerInfo
return PrintLayerInfo()
def test_is_conv2d_net_false(self, nn, is_conv2d):
assert is_conv2d(nn.layers_.values()) is False
def test_is_conv2d_net_true(self, cnn, is_conv2d):
assert is_conv2d(cnn.layers_.values()) is True
def test_is_conv2d_layer(self, nn, cnn, is_conv2d):
assert is_conv2d(nn.layers_['input']) is False
assert is_conv2d(cnn.layers_['pool2']) is False
assert is_conv2d(cnn.layers_['conv1']) is True
def test_is_maxpool2d_net_false(self, nn, is_maxpool2d):
assert is_maxpool2d(nn.layers_.values()) is False
def test_is_maxpool2d_net_true(self, cnn, is_maxpool2d):
assert is_maxpool2d(cnn.layers_.values()) is True
def test_is_maxpool2d_layer(self, nn, cnn, is_maxpool2d):
assert is_maxpool2d(nn.layers_['input']) is False
assert is_maxpool2d(cnn.layers_['pool2']) is True
assert is_maxpool2d(cnn.layers_['conv1']) is False
def test_print_layer_info_greeting(self, nn, print_info):
# number of learnable parameters is weights + biases:
# 28 * 28 * 16 + 16 + 16 * 16 + 16 + 16 * 10 + 10 = 13002
expected = '# Neural Network with 13002 learnable parameters\n'
message = print_info._get_greeting(nn)
assert message == expected
def test_print_layer_info_plain_nn(self, nn, print_info):
expected = """\
# name size
--- ------ -------
0 input 1x28x28
1 dense0 16
2 dense1 16
3 output 10"""
message = print_info._get_layer_info_plain(nn)
assert message == expected
def test_print_layer_info_plain_cnn(self, cnn, print_info):
expected = """\
# name size
--- ------ --------
0 input 1x28x28
1 conv1 16x24x24
2 conv2 16x22x22
3 pool2 16x3x3
4 conv3 16x1x1
5 output 10"""
message = print_info._get_layer_info_plain(cnn)
assert message == expected
def test_print_layer_info_conv_cnn(self, cnn, print_info):
expected = """\
name size total cap.Y cap.X cov.Y cov.X
------ -------- ------- ------- ------- ------- -------
input 1x28x28 784 100.00 100.00 100.00 100.00
conv1 16x24x24 9216 100.00 100.00 17.86 17.86
conv2 16x22x22 7744 42.86 42.86 25.00 25.00
pool2 16x3x3 144 42.86 42.86 25.00 25.00
conv3 16x1x1 16 104.35 104.35 82.14 82.14
output 10 10 100.00 100.00 100.00 100.00"""
message, legend = print_info._get_layer_info_conv(cnn)
assert message == expected
expected = """
Explanation
X, Y: image dimensions
cap.: learning capacity
cov.: coverage of image
\x1b[35mmagenta\x1b[0m: capacity too low (<1/6)
\x1b[36mcyan\x1b[0m: image coverage too high (>100%)
\x1b[31mred\x1b[0m: capacity too low and coverage too high
"""
assert legend == expected
| |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Training helper functions that are shared across tasks."""
import contextlib
import functools
import operator
import signal
import typing
from typing import Any, Callable, Dict, Iterable, Optional, Sequence, Tuple, Union
from absl import logging
import dataclasses
import flax
import gin
import jax
import jax.numpy as jnp
import numpy as np
import optax
from gfsa import jax_util
from gfsa.datasets import data_loading
@jax_util.register_dataclass_pytree
@dataclasses.dataclass
class ExampleWithMetadata:
"""Stores an example or batch of examples.
Attributes:
epoch: Integer representing the epoch that this example comes from.
example_id: Integer ID uniquely identifying this example in the dataset.
example: The example itself.
mask: Array that is True for actual examples, False for padding.
static_metadata: Metadata about this example or batch that should result in
a new `jit` XLA computation (i.e. padded shapes).
"""
epoch: Any
example_id: Any
example: Any
mask: jax_util.NDArray = np.array(True)
static_metadata: Any = None
@jax_util.register_dataclass_pytree
@dataclasses.dataclass
class RatioMetric:
"""A ratio, where numerator and denominator should be summed separately.
Attributes:
numerator: Numerator of the metric.
denominator: Denominator of the metric.
"""
numerator: jax_util.NDArray
denominator: jax_util.NDArray
MetricValue = Union[float, jax_util.NDArray, RatioMetric]
# A loss function is a callable (model, example, static_metadata)
# -> (loss, metrics)
# pyformat: disable
LossFunWithMetrics = Callable[
[Any, Any, Any],
Tuple[jax_util.NDArray, Dict[str, MetricValue]]]
# pyformat: enable
# A validation function is a callable (replicated_model) -> (objective, metrics)
# where model is a tree of ShardedDeviceArrays, and objective is the value we
# want to make decrease.
ValidationFunction = Callable[[Any], Tuple[float, Dict[str, MetricValue]]]
def device_broadcast(x, num_devices):
"""Broadcast a value to all devices."""
return jax.pmap(lambda _: x)(jnp.arange(num_devices))
def _parallel_train_step(
optimizer,
batched_examples,
static_batch_metadata,
loss_fn,
max_global_norm = None,
**optimizer_hyper_params,
):
"""Train the model for one step in parallel across devices.
Args:
optimizer: Optimizer that tracks the model and parameter state. Should be
replicated to each device, i.e. should contain ShardedDeviceArrays with a
leading axis (num_devices, ...) but with the same content on each device.
batched_examples: A structure of NDArrays representing a batch of examples.
Should have two leading batch dimensions: (num_devices,
batch_size_per_device, ...)
static_batch_metadata: Metadata about this batch, which will be shared
across all batched examples. Each value of this results in a separate
XLA-compiled module.
loss_fn: Task-specific non-batched loss function to apply. Should take the
current model (optimizer.target) and an example from batched_examples, and
return a tuple of the current loss (as a scalar) and a dictionary from
string names to metric values (also scalars, or RatioMetrics).
max_global_norm: Maximum global norm to clip gradients to. Should be a
scalar, which will be broadcast automatically.
**optimizer_hyper_params: Hyperparameters to pass to the optimizer's
`apply_gradient` function, which will be broadcast across devices
automatically.
Returns:
Tuple (updated_optimizer, grads_ok, metrics). Metrics will be as returned by
loss_fn, with an extra elements "loss". All metrics will be averaged
across all elements of the batch. Both optimizer and metrics will contain
ShardedDeviceArrays that are identical across devices. grads_ok will be
a replicated bool ndarray that is True if the gradients were finite.
"""
def batched_loss_fn(model):
"""Apply loss function across a batch of examples."""
loss, metrics = jax.vmap(loss_fn, (None, 0, None))(model, batched_examples,
static_batch_metadata)
return jnp.mean(loss), metrics
# Compute gradients of loss, along with metrics.
(loss, metrics), grads = jax.value_and_grad(
batched_loss_fn, has_aux=True)(
optimizer.target)
metrics["loss"] = loss
# Exchange average gradients and metrics across devices.
agg_grads = jax.lax.pmean(grads, "devices")
agg_metrics = {}
for k, v in metrics.items():
if isinstance(v, RatioMetric):
num = jax.lax.psum(jnp.sum(v.numerator), "devices")
denom = jax.lax.psum(jnp.sum(v.denominator), "devices")
new_value = num / denom
else:
# Use nanmean to aggregate bare floats.
new_value = jnp.nanmean(jax.lax.all_gather(v, "devices"))
agg_metrics[k] = new_value
# Compute global norm and possibly clip.
global_norm = optax.global_norm(agg_grads)
agg_metrics["gradient_global_norm"] = global_norm
if max_global_norm is not None:
should_clip = global_norm > max_global_norm
agg_grads = jax.tree_map(
lambda g: jnp.where(should_clip, g * max_global_norm / global_norm, g),
agg_grads)
agg_metrics["gradient_was_clipped"] = should_clip.astype("float32")
# Check for non-finite gradients.
grads_ok = jnp.all(
jnp.stack([jnp.all(jnp.isfinite(x)) for x in jax.tree_leaves(agg_grads)]))
# Apply updates.
updated_optimizer = optimizer.apply_gradient(agg_grads,
**optimizer_hyper_params)
return updated_optimizer, grads_ok, agg_metrics, agg_grads
def _build_parallel_train_step():
"""Builds an accelerated version of the train step function."""
# We need to wrap and unwrap so that the final function can be called with
# keyword arguments, but we still maintain the proper axes.
@functools.partial(
jax.pmap,
axis_name="devices",
in_axes=(0, 0, None, None, None, None),
static_broadcasted_argnums=(2, 3))
def wrapped(optimizer, batched_examples, static_batch_metadata, loss_fn,
max_global_norm, optimizer_hyper_params):
return _parallel_train_step(optimizer, batched_examples,
static_batch_metadata, loss_fn, max_global_norm,
**optimizer_hyper_params)
@functools.wraps(_parallel_train_step)
def wrapper(optimizer, batched_examples, static_batch_metadata, loss_fn,
max_global_norm, **optimizer_hyper_params):
return wrapped(optimizer, batched_examples, static_batch_metadata, loss_fn,
max_global_norm, optimizer_hyper_params)
return wrapper
# The primary version of the training step, with the associated jit cache.
parallel_train_step = _build_parallel_train_step()
def warmup_train_step(
optimizer,
batched_example,
static_batch_metadata,
loss_fn,
optimizer_is_replicated = False,
profile = False,
runner=None,
):
"""Run a fake train step to warm up JIT cache.
Args:
optimizer: Optimizer that tracks the model and parameter state.
batched_example: A structure of NDArrays representing a batch of examples.
static_batch_metadata: Metadata about the batch, which will be shared across
all batched examples.
loss_fn: Task-specific non-batched loss function to apply. Should take the
current model (optimizer.target) and an example from batched_examples, and
return a tuple of the current loss (as a scalar) and a dictionary from
string names to metric values (also scalars).
optimizer_is_replicated: Whether optimizer is already replicated.
profile: Whether to enable profiling during warmup.
runner: If profile=True, the runner to use when profiling.
"""
num_devices = jax.local_device_count()
if optimizer_is_replicated:
replicated_optimizer = optimizer
else:
replicated_optimizer = device_broadcast(optimizer, num_devices)
(replicated_optimizer,
batched_example) = jax.tree_map(jax.device_put,
(replicated_optimizer, batched_example))
try:
max_global_norm = gin.query_parameter(
"train_util.training_loop.max_global_norm")
except ValueError:
max_global_norm = None
def go():
# Note that value for learning_rate is arbitrary, but we pass it here to
# warm up the jit cache (since we are passing a learning rate at training
# time).
res = parallel_train_step(
replicated_optimizer,
batched_example,
static_batch_metadata,
loss_fn,
max_global_norm=max_global_norm,
learning_rate=0.0)
jax.tree_map(lambda x: x.block_until_ready(), res)
if profile:
stats = runner.try_run_and_profile(go, catch_resource_exhausted=False)
logging.info("Warmed up train step with stats: %s", stats)
else:
go()
logging.info("Warmed up train step")
def build_averaging_validator(
loss_fn,
valid_iterator_factory,
objective_metric_name = None,
include_total_counts = False,
prefetch = True,
):
"""Validate by computing averages over a validation set.
Args:
loss_fn: Loss function for the task.
valid_iterator_factory: Constructs iterators of batched examples from the
validation set, with two batch axes. To iterate over a fixed part of the
validation set, consider using build_one_pass_iterator_factory. To
randomly sample from a validation set, you can use something like
`lambda: itertools.islice(validation_iterator, num_batches)`.
objective_metric_name: Name of the metric that is the objective value.
include_total_counts: Whether to report numerator and denominator separately
for RatioMetric objects, along with the "validation_total_example_count"
metric.
prefetch: Whether to prefetch validation examples.
Returns:
Validation function that runs loss_fn and aggregates the results, reporting
the loss as the objective, and using sum to accumulate metrics.
"""
if objective_metric_name is None:
objective_metric_name = "loss"
@functools.partial(
jax.pmap, axis_name="devices", static_broadcasted_argnums=3)
def parallel_metrics_batch(model, batched_examples, batch_mask,
static_metadata):
loss, metrics = jax.vmap(loss_fn, (None, 0, None))(model, batched_examples,
static_metadata)
metrics["loss"] = loss
metrics = jax.tree_map(
lambda x: jnp.where(batch_mask, x, jnp.zeros_like(x)), metrics)
metrics = jax.tree_map(lambda x: jax.lax.psum(jnp.sum(x), "devices"),
metrics)
return metrics
def validation_function(model):
with contextlib.ExitStack() as exit_stack:
valid_iterator = valid_iterator_factory()
if prefetch:
valid_iterator = exit_stack.enter_context(
data_loading.ThreadedPrefetcher(valid_iterator, 4))
accumulated = None
example_count = 0
for batch in valid_iterator:
results = parallel_metrics_batch(model, batch.example, batch.mask,
batch.static_metadata)
metrics = jax.tree_map(float, flax.jax_utils.unreplicate(results))
metrics["epoch"] = np.sum(batch.epoch)
if accumulated is None:
accumulated = metrics
else:
accumulated = jax.tree_multimap(operator.add, accumulated, metrics)
example_count += jnp.count_nonzero(batch.mask)
assert example_count > 0, "Validation iterator must be nonempty"
accumulated = typing.cast(Dict[str, Any], accumulated)
final_metrics = {}
for k, v in accumulated.items():
if isinstance(v, RatioMetric):
final_metrics[k] = v.numerator / v.denominator
if include_total_counts:
final_metrics[k + "_numerator"] = v.numerator
final_metrics[k + "_denominator"] = v.denominator
else:
final_metrics[k] = v / example_count
objective = final_metrics[objective_metric_name]
if include_total_counts:
final_metrics["validation_total_example_count"] = example_count
return (objective, final_metrics)
return validation_function
@contextlib.contextmanager
def catch_interrupts_once(callback,
catch_signals = (signal.SIGINT,
signal.SIGABRT)):
# pylint: disable=g-doc-return-or-yield
"""Context manager to catch interrupt signals.
Only catches the first signal sent, so that repeated interrupts will kill the
job as normal.
Args:
callback: Function to run when the signal is caught the first time.
catch_signals: Signals to catch.
Returns:
A context manager that will catch interrupts inside the block.
"""
# pylint: enable=g-doc-return-or-yield
known_signals = {
signal.SIGINT: "SIGINT",
signal.SIGABRT: "SIGABRT",
}
def _handler(signal_number, frame):
del frame # Unused.
logging.warning("Caught interrupt signal %s",
known_signals.get(signal_number, signal_number))
callback(signal_number)
_restore_handlers()
original_handlers = {}
for signal_number in catch_signals:
original_handlers[signal_number] = signal.signal(signal_number, _handler)
already_restored = False
def _restore_handlers():
nonlocal already_restored
if already_restored:
return
else:
already_restored = True
for signal_number in catch_signals:
current_handler = signal.signal(signal_number,
original_handlers[signal_number])
if current_handler is not _handler:
logging.error(
"Unexpected active signal handler %s for %s; "
"expected the signal hander from "
"`catch_interrupts_once`! Restored to %s anyways.",
current_handler, known_signals.get(signal_number, signal_number),
original_handlers[signal_number])
try:
yield
finally:
_restore_handlers()
| |
from __future__ import division
from collections import defaultdict
import sys, os
import json
import math
import string
from math import log, pow
#import pandas as pd
class MeasureAccuracy:
"""
Measure the accuracy of the output of tagged tweets with respect to the expected tags.
Expected and actual both have to have the exact same tweets for this to work
"""
def __init__(self, expected, predicted):
self.expectedFilePath = expected
self.actualFilePath = predicted
self.expectedLines = []
self.actualLines = []
if self.readfiles():
self.calculateAccuracy()
"""
Load the data from expected and actual files
"""
def readfiles(self):
with open(self.expectedFilePath, 'r') as f:
for line in f:
self.expectedLines.append(json.loads(line))
with open(self.actualFilePath, 'r') as f:
for line in f:
self.actualLines.append(json.loads(line))
if len(self.expectedLines) != len(self.actualLines):
print('ERROR: Expected and actual file lengths dont match')
return False
return True
"""
Go through each tweet and measure the % accuracy
"""
def calculateAccuracy(self):
totalcount = 0
totalmatch = 0
expectedArrary = []
actualArray = []
for i in range(len(self.expectedLines)):
currExpected = self.expectedLines[i]
currActual = self.actualLines[i]
printline = 0
for j in range(len(currExpected)):
totalcount += 1
expectedArrary.append(currExpected[j][1])
actualArray.append(currActual[j][1])
if currExpected[j] == currActual[j]:
totalmatch += 1
else:
if printline == 0:
#print 'exp: ' + str(currExpected)
#print 'actual: ' + str(currActual)
printline = 1
"""
y_pred = pd.Series(actualArray, name='Predicted')
y_exp = pd.Series(expectedArrary, name='Actual')
df_confusion = pd.crosstab(y_exp, y_pred, rownames=['Actual'], colnames=['Predicted'], margins=True)
print df_confusion
"""
print("accuracy %d/%d: %.2f%%" % (totalmatch, totalcount, 100*(totalmatch/totalcount)))
class TrainTrigramHMM:
startSymbol = '<s>'
stopSymbol = '</s>'
startState = '<s>'
stopState = '</s>'
def __init__(self, filepath):
self.filepath = filepath
self.sentenceList = []
self.tagToCount = defaultdict(lambda: 0)
self.transToCount = defaultdict(lambda: 0)
self.emissToCount = defaultdict(lambda: 0)
self.trigramToCount = defaultdict(lambda: 0)
self.trainFromFile()
def trainFromFile(self):
with open(self.filepath, 'r') as f:
for line in f:
self.sentenceList.append(json.loads(line))
#each sentence training
for sentence in self.sentenceList:
prevTag1 = TrainTrigramHMM.startState
prevTag2 = TrainTrigramHMM.startState
self.tagToCount[prevTag1] += 1
self.tagToCount[prevTag2] += 1
for word in sentence:
currWord = word[0].encode('utf-8')
currTag = word[1].encode('utf-8')
self.tagToCount[currTag] += 1
self.trigramToCount[(prevTag1, prevTag2, currTag)] += 1
self.emissToCount[(currWord, currTag)] += 1
self.transToCount[(prevTag1, prevTag2)] += 1
prevTag1 = prevTag2
prevTag2 = currTag
#For last word to stop symbol
currWord = TrainTrigramHMM.stopSymbol
currTag = TrainTrigramHMM.stopState
self.tagToCount[currTag] += 1
self.trigramToCount[(prevTag1, prevTag2, currTag)] += 1
self.emissToCount[(currWord, currTag)] += 1
self.transToCount[(prevTag1, prevTag2)] += 1
def getEmissProb(self, emissData):
word, tag = emissData
count = self.emissToCount[emissData]
if count == 0: #check for OOV UNK-EMOTICON, UNK-URL, UNK-MENTION, UNK-HASHTAG
if word.startswith('\u') and tag == 'E':
return 0.0 #log prob of 1
elif word.startswith('http') and tag == 'U':
return 0.0
elif word.startswith('@') and tag == '@':
return 0.0
elif word.startswith('#') and tag == '#':
return 0.0
#add k smoothing
score = log(count + 0.2) - (log(self.tagToCount[tag] + (0.2 * len(self.tagToCount))))
return score
def getTransProb(self, transData):
prevT1, prevT2, currT = transData
countTrans = self.transToCount[(prevT1, prevT2)]
if countTrans == 0:
countTrans = 0.0000002 #if transition wasn't seen in test
countTri = self.trigramToCount[transData]
# add k smoothing
score = log(countTri + 0.2) - (log(countTrans) + (0.2 * len(self.tagToCount)))
return score
class TagSentencesInFile:
def __init__(self, filepath, trigramHMM, outFile="outfile.json"):
self.filepath = filepath
self.sentenceList = []
self.outputFile = outFile
self.trigramHMM = trigramHMM
self.tagList = trigramHMM.tagToCount.keys()
self.tagList.insert(0, self.tagList.pop(self.tagList.index(TrainTrigramHMM.startState)))
self.loadFileData()
def loadFileData(self):
with open(self.filepath, 'r') as f:
for line in f:
self.sentenceList.append(json.loads(line))
def tagEachSentence(self, sentence):
wordCount = len(sentence)
########### Initialize matrices #################
Viterbi = {}
finalpath = {}
Viterbi[0, TrainTrigramHMM.startState, TrainTrigramHMM.startState] = 0.0
finalpath[TrainTrigramHMM.startState, TrainTrigramHMM.startState] = []
########### Start Viterbi #######################
for t in xrange(1, wordCount):
temppath = {}
currObs = sentence[t][0]
for tag in self.returnTagList(t-1):
for tag1 in self.returnTagList(t):
Viterbi[t, tag, tag1], past = max(
[(Viterbi[t - 1, currTag, tag] + self.trigramHMM.getTransProb((currTag, tag, tag1)) + self.trigramHMM.getEmissProb((currObs, tag1)), currTag) for currTag in self.returnTagList(t-2)])
temppath[tag, tag1] = finalpath[past, tag] + [tag1]
finalpath = temppath
###### finish viterbi with final state ######
###backtrack!
pval, tagmax, tag1max = max([(Viterbi[wordCount-1, tag, tag1] + self.trigramHMM.getTransProb((tag, tag1, TrainTrigramHMM.startState)), tag, tag1) for tag in self.tagList for tag1 in self.tagList])
returnpath = finalpath[tagmax, tag1max]
returnpath.insert(0, TrainTrigramHMM.startState)
return returnpath
#only return start state if in first iteration of loop
def returnTagList(self, count):
if count == -1 or count == 0:
return [TrainTrigramHMM.startState]
else:
return self.tagList
def tagSentences(self):
counting = 0
for sentence in self.sentenceList:
counting += 1
print 'tagging sentence # %d' % counting
sentence.insert(0, [TrainTrigramHMM.startSymbol, TrainTrigramHMM.startState])
sentence.append([TrainTrigramHMM.stopSymbol, TrainTrigramHMM.stopState])
path = self.tagEachSentence(sentence)
self.writeSentenceOutput(sentence, path)
def writeSentenceOutput(self, sentence, path):
finalout = []
with open(self.outputFile, 'a+') as f:
for i in xrange(1, len(sentence)-1):
finalout.append([sentence[i][0], path[i]])
json.dump(finalout, f)
f.write('\n')
def main(trainPath='twt.train.json', testPath='twt.test.json', outpath='testout.json'):
trigramHMM = TrainTrigramHMM(trainPath)
print 'generated trigram HMMs \n'
tagger = TagSentencesInFile(testPath, trigramHMM, outpath)
print 'tagger created \n'
tagger.tagSentences()
print 'sentences tagged \n measuring accuracy \n'
accuracy = MeasureAccuracy(testPath, outpath)
if __name__ == '__main__':
main(sys.argv[1], sys.argv[2], sys.argv[3])
| |
"""Convert dictionaries to other objects"""
__author__ = 'thorwhalen'
import itertools
import re
from collections import namedtuple as _namedtuple
from collections import Counter
from warnings import warn
def kv_tuple_list(d):
"""
Transforms a dict into a list of (key, val) tuples.
This tuple_list can recover the original dict by doing dict(tuple_list)
:param d: dict {a: aa, b: bb, etc.}
:return: list of tuples [(a, aa), (b, bb), etc.]
Example:
>>> n = 100
>>> d = {k: v for k, v in zip(range(n), range(n))}
>>> assert dict(kv_tuple_list(d)) == d
>>> from numpy.random import rand
>>> d = {k: v for k, v in zip(rand(n), rand(n))}
>>> assert dict(kv_tuple_list(d)) == d
"""
return [(k, v) for k, v in d.items()]
def table_str_with_key_and_value_columns(d, key_col_name='key', val_col_name='val'):
max_key_size = max(list(map(len, list(map(str, list(d.keys()))))))
max_val_size = max(list(map(len, list(map(str, list(d.values()))))))
format_str = '{:<' + str(max_key_size) + '} {:<' + str(max_val_size) + '}\n'
s = format_str.format(key_col_name, val_col_name)
for k, v in d.items():
s += format_str.format(k, v)
return s
def namedtuple(d, name='namedtuple'):
return _namedtuple('blah', list(d.keys()))(**d)
class hashabledict(dict):
def __hash__(self):
return hash(tuple(sorted(self.items())))
def count_dicts(d):
return Counter(list(map(hashabledict, d)))
# class Struct:
# def __init__(self, obj):
# for k, v in obj.iteritems():
# if not isinstance(k, basestring):
# warn("One of a dicts keys ({}) was not a string and will be converted to be one".format(k))
# k = str(k)
# if isinstance(v, dict):
# setattr(self, k, Struct(v))
# else:
# setattr(self, k, v)
#
# def __getitem__(self, val):
# return self.__dict__[val]
#
# def __repr__(self):
# return '{%s}' % str(', '.join('%s : %s' % (k, repr(v)) for (k, v) in self.__dict__.iteritems()))
class Struct(dict):
def __init__(self, *args, **kwargs):
super(Struct, self).__init__(*args, **kwargs)
for arg in args:
if isinstance(arg, dict):
for k, v in arg.items():
if not isinstance(v, dict):
self[k] = v
else:
self[k] = Struct(v)
if kwargs:
for k, v in kwargs.items():
if not isinstance(v, dict):
self[k] = v
else:
self[k] = Struct(v)
def __getitem__(self, value):
return self.get(value)
def __setattr__(self, key, value):
self.__setitem__(key, value)
def __setitem__(self, key, value):
super(Struct, self).__setitem__(key, value)
self.__dict__.update({key: value})
def __delattr__(self, item):
self.__delitem__(item)
def __delitem__(self, key):
super(Struct, self).__delitem__(key)
del self.__dict__[key]
def __repr__(self):
return '{%s}' % str(', '.join('%s : %s' % (k, repr(v)) for (k, v) in self.__dict__.items()))
def word_replacer(rep_with_dict, inter_token_re=r"\b"):
regex = inter_token_re + r"(?:" + "|".join(re.escape(word) for word in rep_with_dict) + r")" + inter_token_re
reobj = re.compile(regex, re.I)
return lambda text: reobj.sub(lambda x: rep_with_dict[x.group(0)], text)
def inverse_one_to_one(d):
'''
:param d: a dict that is such that each (unique) key is mapped to a unique value
:return: returns the inverse (value->key) dict
Example:
inverse_one_to_one({'A':'a', 'B':'bb', 'C':'cc'})
== {'a': 'A', 'cc': 'C', 'bb': 'B'}
'''
value_list = list(d.values())
assert len(value_list) == len(set(value_list)), "You cannot use values_to_keys_dict() if there are duplicate values"
return dict((v, k) for k, v in d.items())
def inverse_one_to_many(d):
'''
:param d: a dict that is such that each (unique) key is mapped to a list of values (whose values are globally unique)
:return: returns the inverse (value->key) dict
Example:
inverse_one_to_many({'A':['a','aa','aaa'], 'B':['b','bb']})
== {'a': 'A', 'aa': 'A', 'b': 'B', 'aaa': 'A', 'bb': 'B'}
'''
value_list = list(itertools.chain.from_iterable(list(d.values())))
assert len(value_list) == len(set(value_list)), "You cannot use values_to_keys_dict() if there are duplicate values"
inverse_dict = dict()
for k, v in d.items():
for vv in v:
inverse_dict[vv] = k
return inverse_dict
def inverse_many_to_one(d):
'''
:param d: a dict that is such that each (unique) key is mapped to a value, but different keys can map to the same value
:return: returns the inverse (value->key) dict
The keys of the inverse dict will be the unique values found in the original dict, and the values of the inverse will
gather in the list all (original dict) keys that mapped to it.
Example:
inverse_many_to_one({'a': 'A', 'aa': 'A', 'b': 'B', 'aaa': 'A', 'bb': 'B'})
== {'A':['a','aa','aaa'], 'B':['b','bb']}
'''
inverse_dict = {}
for k, v in d.items():
inverse_dict[v] = inverse_dict.get(v, [])
inverse_dict[v].append(k)
return inverse_dict
# value_list = list(itertools.chain.from_iterable(d.values()))
# assert len(value_list) == len(set(value_list)), "You cannot use values_to_keys_dict() if there are duplicate values"
# inverse_dict = dict()
# for k, v in d.iteritems():
# for vv in v:
# inverse_dict[vv] = k
# return inverse_dict
# def dataframe(d):
# """
# returns a datafame from a multi-level dict
# NOTE: use pd.DataFrame.from_dict() instead for up to two depth levels
# """
# val_list
# for key,val in d.values():
# frames.append(pd.DataFrame.from_dict(val, orient='index'))
# return pd.concat(frames, keys=key_list)
# some code from someone else that words for depths of exactly 3
# key_list = []
# frames = []
# for key,val in d.iteritems():
# key_list.append(key)
# # print frames
# frames.append(pd.DataFrame.from_dict(val, orient='index'))
# return pd.concat(frames, keys=key_list)
# if __name__ == "__main__":
# # test for dataframe
# from pdict.to import dataframe as dict2df
# w = {12: {'Category 1': {'att_1': 1, 'att_2': 'whatever'},
# 'Category 2': {'att_1': 23, 'att_2': 'another'}},
# 15: {'Category 1': {'att_1': 10, 'att_2': 'foo'},
# 'Category 2': {'att_1': 30, 'att_2': 'bar'}}}
# df = dict2df(w)
# print w
# print df
| |
# Copyright (C) [2015-2017] [Thomson Reuters LLC]
# Copyright (C) [2015-2017] [Panos Kittenis]
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""InfluxGraph utility functions"""
from __future__ import absolute_import, print_function
import datetime
import sys
import re
import hashlib
import memcache
from .constants import INFLUXDB_AGGREGATIONS
try:
from .ext.nodetrie import Node
except ImportError:
from .classes.tree import NodeTreeIndex as Node
try:
from .ext.templates import get_series_with_tags, heapsort, \
_make_path_from_template
except ImportError:
from .templates import get_series_with_tags, heapsort, \
_make_path_from_template
def calculate_interval(start_time, end_time, deltas=None):
"""Calculates wanted data series interval according to start and end times
Returns interval in seconds
:param start_time: Start time in seconds from epoch
:param end_time: End time in seconds from epoch
:type start_time: int
:type end_time: int
:param deltas: Delta configuration to use. Defaults hardcoded if no
configuration is provided
:type deltas: dict(max time range of query in seconds: interval to use
in seconds)
:rtype: int - *Interval in seconds*
"""
time_delta = end_time - start_time
deltas = deltas if deltas else {
# 15 min -> 10s
900: 10,
# 30 min -> 30s
1800: 30,
# # 1 hour -> 1s
# 3600 : 1,
# # 1 day -> 30s
# 86400 : 30,
# 3 days -> 1min
259200: 60,
# 7 days -> 5min
604800: 300,
# 14 days -> 10min
1209600: 600,
# 28 days -> 15min
2419200: 900,
# 2 months -> 30min
4838400: 1800,
# 4 months -> 1hour
9676800: 3600,
# 12 months -> 3hours
31536000: 7200,
# 4 years -> 12hours
126144000: 43200,
}
for delta in sorted(deltas.keys()):
if time_delta <= delta:
return deltas[delta]
# 1 day default, or if time range > max configured (4 years default max)
return 86400
def get_retention_policy(interval, retention_policies):
"""Get appropriate retention policy for interval provided
:param interval: Interval of query in seconds
:type interval: int
:param retention_policies: Retention policy configuration
:type retention_policies: dict(max time range of interval
in seconds: retention policy name)
:rtype: ``str`` or ``None``
"""
if not retention_policies:
return
for retention_interval in sorted(retention_policies.keys()):
if interval <= retention_interval:
return retention_policies[retention_interval]
# In the case that desired interval is beyond configured interval range,
# return policy for max interval
return retention_policies[max(sorted(retention_policies.keys()))]
class Query(object): # pylint: disable=too-few-public-methods
"""Graphite-API compatible query class"""
def __init__(self, pattern):
self.pattern = pattern
def _compile_aggregation_patterns(aggregation_functions):
"""Compile aggregation function patterns to compiled regex objects"""
if not aggregation_functions:
return
compiled_aggregations = {}
for pattern in aggregation_functions.keys():
if not aggregation_functions[pattern] in INFLUXDB_AGGREGATIONS:
sys.stderr.write(
"Requested aggregation function '%s' is not a valid InfluxDB "
"aggregation function - ignoring..\n" % (
aggregation_functions[pattern],))
continue
try:
compiled_aggregations[
re.compile(r'%s' % (pattern,))] = aggregation_functions[pattern]
except re.error:
sys.stderr.write(
"Error compiling regex pattern '%s' - ignoring..\n" % (
pattern,))
return compiled_aggregations
def get_aggregation_func(path, aggregation_functions):
"""Lookup aggregation function for path, if any.
Defaults to 'mean'.
:param path: Path to lookup
:type path: str
:param aggregation_functions: Aggregation function configuration
:type aggregation_functions: dict(<pattern>: <compiled regex>)
"""
if not aggregation_functions:
return 'mean'
for pattern in aggregation_functions:
if pattern.search(path):
return aggregation_functions[pattern]
return 'mean'
def _retrieve_named_field_data(infl_data, measurement_data, measurement,
tags, _data, separator='.'):
measurement_paths = measurement_data[measurement]['paths'][:]
for field in measurement_data[measurement]['fields']:
split_path = []
_make_path_from_template(
split_path, measurement,
measurement_data[measurement]['template'], list(tags.items()),
separator=separator)
split_path = [p[1] for p in heapsort(split_path)]
split_path.append(field)
metric = '.'.join(split_path)
if metric not in measurement_paths:
continue
del measurement_paths[measurement_paths.index(metric)]
_data[metric] = [d[field]
for d in infl_data.get_points(
measurement=measurement, tags=tags)]
measurement_data[measurement]['paths'] = measurement_paths
def _retrieve_field_data(infl_data, measurement_data, measurement,
metric, tags, _data):
# Retrieve value field data
if 'value' in measurement_data[measurement]['fields']:
_data[metric] = [d['value']
for d in infl_data.get_points(
measurement=measurement, tags=tags)]
return
# Retrieve non value named field data with fields from measurement_data
_retrieve_named_field_data(infl_data, measurement_data,
measurement, tags, _data)
def _read_measurement_metric_values(infl_data, measurement, paths, _data):
if measurement not in paths:
return
_data[measurement] = [d['value']
for d in infl_data.get_points(
measurement=measurement)]
def read_influxdb_values(influxdb_data, paths, measurement_data):
"""Return metric path -> datapoints dict for values from InfluxDB data"""
_data = {}
if not isinstance(influxdb_data, list):
influxdb_data = [influxdb_data]
m_path_ind = 0
seen_measurements = set()
for infl_data in influxdb_data:
for infl_keys in infl_data.keys():
measurement = infl_keys[0]
tags = infl_keys[1] if infl_keys[1] is not None else {}
if not measurement_data:
_read_measurement_metric_values(infl_data, measurement,
paths, _data)
continue
elif measurement not in measurement_data:
continue
if measurement not in seen_measurements:
seen_measurements = set(
tuple(seen_measurements) + (measurement,))
m_path_ind = 0
elif len(measurement_data[measurement]['paths']) == 0:
# No paths left for measurement
continue
elif m_path_ind >= len(measurement_data[measurement]['paths']):
m_path_ind = 0
metric = measurement_data[measurement]['paths'][m_path_ind]
m_path_ind += 1
_retrieve_field_data(infl_data, measurement_data,
measurement, metric, tags, _data)
return _data
def gen_memcache_pattern_key(pattern):
"""Generate memcache key from pattern"""
return hashlib.md5(pattern.encode('utf8')).hexdigest()
def gen_memcache_key(start_time, end_time, aggregation_func, paths):
"""Generate memcache key to use to cache request data"""
start_time_dt, end_time_dt = datetime.datetime.fromtimestamp(
float(start_time)), datetime.datetime.fromtimestamp(float(end_time))
td = end_time_dt - start_time_dt
delta = (td.microseconds + (
td.seconds + td.days * 24 * 3600) * 10**6) / 10**6
key_prefix = hashlib.md5("".join(paths).encode('utf8')).hexdigest()
return "".join([key_prefix, aggregation_func, str(delta)]).encode('utf8')
def make_memcache_client(memcache_host, memcache_max_value=1):
"""Make memcache client if given a memcache host or `None`"""
if not memcache_host:
return
return memcache.Client(
[memcache_host], pickleProtocol=-1,
server_max_value_length=1024**2*memcache_max_value)
def parse_series(series, fields, graphite_templates, separator=b'.'):
"""Parses series and fields with/without graphite templates
and returns built Index
:param series: Series to load
:type series: list(unicode str)
:param fields: Per measurement field keys from InfluxDB. May be `None`
:type fields: dict(measurement: [field1, field2, ..])
:param graphite_templates: Graphite templates to use to parse series
and fields.
:type graphite_templates: list(tuple) as returned by \
:mod:`influxgraph.templates.parse_influxdb_graphite_templates`
:rtype: :mod:`influxgraph.classes.tree.NodeTreeIndex`
"""
index = Node()
for serie in series:
# If we have metrics with tags in them split them out and
# pre-generate a correctly ordered split path for that metric
# to be inserted into index
if graphite_templates or ',' in serie:
serie_with_tags = serie.split(',')
if graphite_templates:
for split_path in get_series_with_tags(
serie_with_tags, fields, graphite_templates,
separator=separator):
index.insert_split_path(split_path)
# Series with tags and no templates,
# add only measurement to index
else:
index.insert(serie_with_tags[0])
# No tags, no template
else:
index.insert(serie)
return index
| |
import mock
import unittest2 as unittest
from quantum.agent import netns_cleanup_util as util
class TestNetnsCleanup(unittest.TestCase):
def test_setup_conf(self):
conf = util.setup_conf()
self.assertFalse(conf.force)
def test_kill_dhcp(self, dhcp_active=True):
conf = mock.Mock()
conf.root_helper = 'sudo',
conf.dhcp_driver = 'driver'
method_to_patch = 'quantum.openstack.common.importutils.import_object'
with mock.patch(method_to_patch) as import_object:
driver = mock.Mock()
driver.active = dhcp_active
import_object.return_value = driver
util.kill_dhcp(conf, 'ns')
import_object.called_once_with('driver', conf, mock.ANY, 'sudo',
mock.ANY)
if dhcp_active:
driver.assert_has_calls([mock.call.disable()])
else:
self.assertFalse(driver.called)
def test_kill_dhcp_no_active(self):
self.test_kill_dhcp(False)
def test_eligible_for_deletion_ns_not_uuid(self):
ns = 'not_a_uuid'
self.assertFalse(util.eligible_for_deletion(mock.Mock(), ns))
def _test_eligible_for_deletion_helper(self, prefix, force, is_empty,
expected):
ns = prefix + '6e322ac7-ab50-4f53-9cdc-d1d3c1164b6d'
conf = mock.Mock()
conf.root_helper = 'sudo'
with mock.patch('quantum.agent.linux.ip_lib.IPWrapper') as ip_wrap:
ip_wrap.return_value.namespace_is_empty.return_value = is_empty
self.assertEqual(util.eligible_for_deletion(conf, ns, force),
expected)
expected_calls = [mock.call('sudo', ns)]
if not force:
expected_calls.append(mock.call().namespace_is_empty())
ip_wrap.assert_has_calls(expected_calls)
def test_eligible_for_deletion_empty(self):
self._test_eligible_for_deletion_helper('qrouter-', False, True, True)
def test_eligible_for_deletion_not_empty(self):
self._test_eligible_for_deletion_helper('qdhcp-', False, False, False)
def test_eligible_for_deletion_not_empty_forced(self):
self._test_eligible_for_deletion_helper('qdhcp-', True, False, True)
def test_unplug_device_regular_device(self):
conf = mock.Mock()
device = mock.Mock()
util.unplug_device(conf, device)
device.assert_has_calls([mock.call.link.delete()])
def test_unplug_device_ovs_port(self):
conf = mock.Mock()
conf.ovs_integration_bridge = 'br-int'
conf.root_helper = 'sudo'
device = mock.Mock()
device.name = 'tap1'
device.link.delete.side_effect = RuntimeError
with mock.patch('quantum.agent.linux.ovs_lib.OVSBridge') as ovs_br_cls:
br_patch = mock.patch(
'quantum.agent.linux.ovs_lib.get_bridge_for_iface')
with br_patch as mock_get_bridge_for_iface:
mock_get_bridge_for_iface.return_value = 'br-int'
ovs_bridge = mock.Mock()
ovs_br_cls.return_value = ovs_bridge
util.unplug_device(conf, device)
mock_get_bridge_for_iface.assert_called_once_with(
conf.root_helper, 'tap1')
ovs_br_cls.called_once_with('br-int', 'sudo')
ovs_bridge.assert_has_calls(
[mock.call.delete_port(device.name)])
def test_unplug_device_cannot_determine_bridge_port(self):
conf = mock.Mock()
conf.ovs_integration_bridge = 'br-int'
conf.root_helper = 'sudo'
device = mock.Mock()
device.name = 'tap1'
device.link.delete.side_effect = RuntimeError
with mock.patch('quantum.agent.linux.ovs_lib.OVSBridge') as ovs_br_cls:
br_patch = mock.patch(
'quantum.agent.linux.ovs_lib.get_bridge_for_iface')
with br_patch as mock_get_bridge_for_iface:
with mock.patch.object(util.LOG, 'debug') as debug:
mock_get_bridge_for_iface.return_value = None
ovs_bridge = mock.Mock()
ovs_br_cls.return_value = ovs_bridge
util.unplug_device(conf, device)
mock_get_bridge_for_iface.assert_called_once_with(
conf.root_helper, 'tap1')
self.assertEquals(ovs_br_cls.mock_calls, [])
self.assertTrue(debug.called)
def _test_destroy_namespace_helper(self, force, num_devices):
ns = 'qrouter-6e322ac7-ab50-4f53-9cdc-d1d3c1164b6d'
conf = mock.Mock()
conf.root_helper = 'sudo'
lo_device = mock.Mock()
lo_device.name = 'lo'
devices = [lo_device]
while num_devices:
dev = mock.Mock()
dev.name = 'tap%d' % num_devices
devices.append(dev)
num_devices -= 1
with mock.patch('quantum.agent.linux.ip_lib.IPWrapper') as ip_wrap:
ip_wrap.return_value.get_devices.return_value = devices
ip_wrap.return_value.netns.exists.return_value = True
with mock.patch.object(util, 'unplug_device') as unplug:
with mock.patch.object(util, 'kill_dhcp') as kill_dhcp:
util.destroy_namespace(conf, ns, force)
expected = [mock.call('sudo', ns)]
if force:
expected.extend([
mock.call().netns.exists(ns),
mock.call().get_devices(exclude_loopback=True)])
self.assertTrue(kill_dhcp.called)
unplug.assert_has_calls(
[mock.call(conf, d) for d in
devices[1:]])
expected.append(mock.call().garbage_collect_namespace())
ip_wrap.assert_has_calls(expected)
def test_destory_namespace_empty(self):
self._test_destroy_namespace_helper(False, 0)
def test_destory_namespace_not_empty(self):
self._test_destroy_namespace_helper(False, 1)
def test_destory_namespace_not_empty_forced(self):
self._test_destroy_namespace_helper(True, 2)
def test_main(self):
namespaces = ['ns1', 'ns2']
with mock.patch('quantum.agent.linux.ip_lib.IPWrapper') as ip_wrap:
ip_wrap.get_namespaces.return_value = namespaces
with mock.patch('eventlet.sleep') as eventlet_sleep:
conf = mock.Mock()
conf.root_helper = 'sudo'
conf.force = False
methods_to_mock = dict(
eligible_for_deletion=mock.DEFAULT,
destroy_namespace=mock.DEFAULT,
setup_conf=mock.DEFAULT)
with mock.patch.multiple(util, **methods_to_mock) as mocks:
mocks['eligible_for_deletion'].return_value = True
mocks['setup_conf'].return_value = conf
util.main()
mocks['eligible_for_deletion'].assert_has_calls(
[mock.call(conf, 'ns1', False),
mock.call(conf, 'ns2', False)])
mocks['destroy_namespace'].assert_has_calls(
[mock.call(conf, 'ns1', False),
mock.call(conf, 'ns2', False)])
ip_wrap.assert_has_calls(
[mock.call.get_namespaces('sudo')])
eventlet_sleep.assert_called_once_with(2)
def test_main_no_candidates(self):
namespaces = ['ns1', 'ns2']
with mock.patch('quantum.agent.linux.ip_lib.IPWrapper') as ip_wrap:
ip_wrap.get_namespaces.return_value = namespaces
with mock.patch('eventlet.sleep') as eventlet_sleep:
conf = mock.Mock()
conf.root_helper = 'sudo'
conf.force = False
methods_to_mock = dict(
eligible_for_deletion=mock.DEFAULT,
destroy_namespace=mock.DEFAULT,
setup_conf=mock.DEFAULT)
with mock.patch.multiple(util, **methods_to_mock) as mocks:
mocks['eligible_for_deletion'].return_value = False
mocks['setup_conf'].return_value = conf
util.main()
ip_wrap.assert_has_calls(
[mock.call.get_namespaces('sudo')])
mocks['eligible_for_deletion'].assert_has_calls(
[mock.call(conf, 'ns1', False),
mock.call(conf, 'ns2', False)])
self.assertFalse(mocks['destroy_namespace'].called)
self.assertFalse(eventlet_sleep.called)
| |
from collections import abc
from datetime import datetime, time
from functools import partial
import numpy as np
from pandas._libs import tslib, tslibs
from pandas._libs.tslibs import Timestamp, conversion, parsing
from pandas._libs.tslibs.parsing import ( # noqa
DateParseError, _format_is_iso, _guess_datetime_format, parse_time_string)
from pandas._libs.tslibs.strptime import array_strptime
from pandas.util._decorators import deprecate_kwarg
from pandas.core.dtypes.common import (
ensure_object, is_datetime64_dtype, is_datetime64_ns_dtype,
is_datetime64tz_dtype, is_float, is_integer, is_integer_dtype,
is_list_like, is_numeric_dtype, is_object_dtype, is_scalar)
from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries
from pandas.core.dtypes.missing import notna
from pandas.core import algorithms
def _guess_datetime_format_for_array(arr, **kwargs):
# Try to guess the format based on the first non-NaN element
non_nan_elements = notna(arr).nonzero()[0]
if len(non_nan_elements):
return _guess_datetime_format(arr[non_nan_elements[0]], **kwargs)
def _maybe_cache(arg, format, cache, convert_listlike):
"""
Create a cache of unique dates from an array of dates
Parameters
----------
arg : integer, float, string, datetime, list, tuple, 1-d array, Series
format : string
Strftime format to parse time
cache : boolean
True attempts to create a cache of converted values
convert_listlike : function
Conversion function to apply on dates
Returns
-------
cache_array : Series
Cache of converted, unique dates. Can be empty
"""
from pandas import Series
cache_array = Series()
if cache:
# Perform a quicker unique check
from pandas import Index
unique_dates = Index(arg).unique()
if len(unique_dates) < len(arg):
cache_dates = convert_listlike(unique_dates.to_numpy(),
True, format)
cache_array = Series(cache_dates, index=unique_dates)
return cache_array
def _convert_and_box_cache(arg, cache_array, box, errors, name=None):
"""
Convert array of dates with a cache and box the result
Parameters
----------
arg : integer, float, string, datetime, list, tuple, 1-d array, Series
cache_array : Series
Cache of converted, unique dates
box : boolean
True boxes result as an Index-like, False returns an ndarray
errors : string
'ignore' plus box=True will convert result to Index
name : string, default None
Name for a DatetimeIndex
Returns
-------
result : datetime of converted dates
Returns:
- Index-like if box=True
- ndarray if box=False
"""
from pandas import Series, DatetimeIndex, Index
result = Series(arg).map(cache_array)
if box:
if errors == 'ignore':
return Index(result, name=name)
else:
return DatetimeIndex(result, name=name)
return result.values
def _return_parsed_timezone_results(result, timezones, box, tz, name):
"""
Return results from array_strptime if a %z or %Z directive was passed.
Parameters
----------
result : ndarray
int64 date representations of the dates
timezones : ndarray
pytz timezone objects
box : boolean
True boxes result as an Index-like, False returns an ndarray
tz : object
None or pytz timezone object
name : string, default None
Name for a DatetimeIndex
Returns
-------
tz_result : ndarray of parsed dates with timezone
Returns:
- Index-like if box=True
- ndarray of Timestamps if box=False
"""
if tz is not None:
raise ValueError("Cannot pass a tz argument when "
"parsing strings with timezone "
"information.")
tz_results = np.array([Timestamp(res).tz_localize(zone) for res, zone
in zip(result, timezones)])
if box:
from pandas import Index
return Index(tz_results, name=name)
return tz_results
def _convert_listlike_datetimes(arg, box, format, name=None, tz=None,
unit=None, errors=None,
infer_datetime_format=None, dayfirst=None,
yearfirst=None, exact=None):
"""
Helper function for to_datetime. Performs the conversions of 1D listlike
of dates
Parameters
----------
arg : list, tuple, ndarray, Series, Index
date to be parced
box : boolean
True boxes result as an Index-like, False returns an ndarray
name : object
None or string for the Index name
tz : object
None or 'utc'
unit : string
None or string of the frequency of the passed data
errors : string
error handing behaviors from to_datetime, 'raise', 'coerce', 'ignore'
infer_datetime_format : boolean
inferring format behavior from to_datetime
dayfirst : boolean
dayfirst parsing behavior from to_datetime
yearfirst : boolean
yearfirst parsing behavior from to_datetime
exact : boolean
exact format matching behavior from to_datetime
Returns
-------
ndarray of parsed dates
Returns:
- Index-like if box=True
- ndarray of Timestamps if box=False
"""
from pandas import DatetimeIndex
from pandas.core.arrays import DatetimeArray
from pandas.core.arrays.datetimes import (
maybe_convert_dtype, objects_to_datetime64ns)
if isinstance(arg, (list, tuple)):
arg = np.array(arg, dtype='O')
# these are shortcutable
if is_datetime64tz_dtype(arg):
if not isinstance(arg, (DatetimeArray, DatetimeIndex)):
return DatetimeIndex(arg, tz=tz, name=name)
if tz == 'utc':
arg = arg.tz_convert(None).tz_localize(tz)
return arg
elif is_datetime64_ns_dtype(arg):
if box and not isinstance(arg, (DatetimeArray, DatetimeIndex)):
try:
return DatetimeIndex(arg, tz=tz, name=name)
except ValueError:
pass
return arg
elif unit is not None:
if format is not None:
raise ValueError("cannot specify both format and unit")
arg = getattr(arg, 'values', arg)
result, tz_parsed = tslib.array_with_unit_to_datetime(arg, unit,
errors=errors)
if box:
if errors == 'ignore':
from pandas import Index
result = Index(result, name=name)
else:
result = DatetimeIndex(result, name=name)
# GH 23758: We may still need to localize the result with tz
# GH 25546: Apply tz_parsed first (from arg), then tz (from caller)
# result will be naive but in UTC
try:
result = result.tz_localize('UTC').tz_convert(tz_parsed)
except AttributeError:
# Regular Index from 'ignore' path
return result
if tz is not None:
if result.tz is None:
result = result.tz_localize(tz)
else:
result = result.tz_convert(tz)
return result
elif getattr(arg, 'ndim', 1) > 1:
raise TypeError('arg must be a string, datetime, list, tuple, '
'1-d array, or Series')
# warn if passing timedelta64, raise for PeriodDtype
# NB: this must come after unit transformation
orig_arg = arg
arg, _ = maybe_convert_dtype(arg, copy=False)
arg = ensure_object(arg)
require_iso8601 = False
if infer_datetime_format and format is None:
format = _guess_datetime_format_for_array(arg, dayfirst=dayfirst)
if format is not None:
# There is a special fast-path for iso8601 formatted
# datetime strings, so in those cases don't use the inferred
# format because this path makes process slower in this
# special case
format_is_iso8601 = _format_is_iso(format)
if format_is_iso8601:
require_iso8601 = not infer_datetime_format
format = None
tz_parsed = None
result = None
if format is not None:
try:
# shortcut formatting here
if format == '%Y%m%d':
try:
# pass orig_arg as float-dtype may have been converted to
# datetime64[ns]
orig_arg = ensure_object(orig_arg)
result = _attempt_YYYYMMDD(orig_arg, errors=errors)
except (ValueError, TypeError, tslibs.OutOfBoundsDatetime):
raise ValueError("cannot convert the input to "
"'%Y%m%d' date format")
# fallback
if result is None:
try:
result, timezones = array_strptime(
arg, format, exact=exact, errors=errors)
if '%Z' in format or '%z' in format:
return _return_parsed_timezone_results(
result, timezones, box, tz, name)
except tslibs.OutOfBoundsDatetime:
if errors == 'raise':
raise
elif errors == 'coerce':
result = np.empty(arg.shape, dtype='M8[ns]')
iresult = result.view('i8')
iresult.fill(tslibs.iNaT)
else:
result = arg
except ValueError:
# if format was inferred, try falling back
# to array_to_datetime - terminate here
# for specified formats
if not infer_datetime_format:
if errors == 'raise':
raise
elif errors == 'coerce':
result = np.empty(arg.shape, dtype='M8[ns]')
iresult = result.view('i8')
iresult.fill(tslibs.iNaT)
else:
result = arg
except ValueError as e:
# Fallback to try to convert datetime objects if timezone-aware
# datetime objects are found without passing `utc=True`
try:
values, tz = conversion.datetime_to_datetime64(arg)
return DatetimeIndex._simple_new(values, name=name, tz=tz)
except (ValueError, TypeError):
raise e
if result is None:
assert format is None or infer_datetime_format
utc = tz == 'utc'
result, tz_parsed = objects_to_datetime64ns(
arg, dayfirst=dayfirst, yearfirst=yearfirst,
utc=utc, errors=errors, require_iso8601=require_iso8601,
allow_object=True)
if tz_parsed is not None:
if box:
# We can take a shortcut since the datetime64 numpy array
# is in UTC
return DatetimeIndex._simple_new(result, name=name,
tz=tz_parsed)
else:
# Convert the datetime64 numpy array to an numpy array
# of datetime objects
result = [Timestamp(ts, tz=tz_parsed).to_pydatetime()
for ts in result]
return np.array(result, dtype=object)
if box:
# Ensure we return an Index in all cases where box=True
if is_datetime64_dtype(result):
return DatetimeIndex(result, tz=tz, name=name)
elif is_object_dtype(result):
# e.g. an Index of datetime objects
from pandas import Index
return Index(result, name=name)
return result
def _adjust_to_origin(arg, origin, unit):
"""
Helper function for to_datetime.
Adjust input argument to the specified origin
Parameters
----------
arg : list, tuple, ndarray, Series, Index
date to be adjusted
origin : 'julian' or Timestamp
origin offset for the arg
unit : string
passed unit from to_datetime, must be 'D'
Returns
-------
ndarray or scalar of adjusted date(s)
"""
if origin == 'julian':
original = arg
j0 = Timestamp(0).to_julian_date()
if unit != 'D':
raise ValueError("unit must be 'D' for origin='julian'")
try:
arg = arg - j0
except TypeError:
raise ValueError("incompatible 'arg' type for given "
"'origin'='julian'")
# preemptively check this for a nice range
j_max = Timestamp.max.to_julian_date() - j0
j_min = Timestamp.min.to_julian_date() - j0
if np.any(arg > j_max) or np.any(arg < j_min):
raise tslibs.OutOfBoundsDatetime(
"{original} is Out of Bounds for "
"origin='julian'".format(original=original))
else:
# arg must be numeric
if not ((is_scalar(arg) and (is_integer(arg) or is_float(arg))) or
is_numeric_dtype(np.asarray(arg))):
raise ValueError(
"'{arg}' is not compatible with origin='{origin}'; "
"it must be numeric with a unit specified ".format(
arg=arg,
origin=origin))
# we are going to offset back to unix / epoch time
try:
offset = Timestamp(origin)
except tslibs.OutOfBoundsDatetime:
raise tslibs.OutOfBoundsDatetime(
"origin {origin} is Out of Bounds".format(origin=origin))
except ValueError:
raise ValueError("origin {origin} cannot be converted "
"to a Timestamp".format(origin=origin))
if offset.tz is not None:
raise ValueError(
"origin offset {} must be tz-naive".format(offset))
offset -= Timestamp(0)
# convert the offset to the unit of the arg
# this should be lossless in terms of precision
offset = offset // tslibs.Timedelta(1, unit=unit)
# scalars & ndarray-like can handle the addition
if is_list_like(arg) and not isinstance(
arg, (ABCSeries, ABCIndexClass, np.ndarray)):
arg = np.asarray(arg)
arg = arg + offset
return arg
@deprecate_kwarg(old_arg_name='box', new_arg_name=None)
def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False,
utc=None, box=True, format=None, exact=True,
unit=None, infer_datetime_format=False, origin='unix',
cache=False):
"""
Convert argument to datetime.
Parameters
----------
arg : integer, float, string, datetime, list, tuple, 1-d array, Series
.. versionadded:: 0.18.1
or DataFrame/dict-like
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as NaT
- If 'ignore', then invalid parsing will return the input
dayfirst : boolean, default False
Specify a date parse order if `arg` is str or its list-likes.
If True, parses dates with the day first, eg 10/11/12 is parsed as
2012-11-10.
Warning: dayfirst=True is not strict, but will prefer to parse
with day first (this is a known bug, based on dateutil behavior).
yearfirst : boolean, default False
Specify a date parse order if `arg` is str or its list-likes.
- If True parses dates with the year first, eg 10/11/12 is parsed as
2010-11-12.
- If both dayfirst and yearfirst are True, yearfirst is preceded (same
as dateutil).
Warning: yearfirst=True is not strict, but will prefer to parse
with year first (this is a known bug, based on dateutil behavior).
.. versionadded:: 0.16.1
utc : boolean, default None
Return UTC DatetimeIndex if True (converting any tz-aware
datetime.datetime objects as well).
box : boolean, default True
- If True returns a DatetimeIndex or Index-like object
- If False returns ndarray of values.
.. deprecated:: 0.25.0
Use :meth:`Series.to_numpy` or :meth:`Timestamp.to_datetime64`
instead to get an ndarray of values or numpy.datetime64,
respectively.
format : string, default None
strftime to parse time, eg "%d/%m/%Y", note that "%f" will parse
all the way up to nanoseconds.
See strftime documentation for more information on choices:
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior
exact : boolean, True by default
- If True, require an exact format match.
- If False, allow the format to match anywhere in the target string.
unit : string, default 'ns'
unit of the arg (D,s,ms,us,ns) denote the unit, which is an
integer or float number. This will be based off the origin.
Example, with unit='ms' and origin='unix' (the default), this
would calculate the number of milliseconds to the unix epoch start.
infer_datetime_format : boolean, default False
If True and no `format` is given, attempt to infer the format of the
datetime strings, and if it can be inferred, switch to a faster
method of parsing them. In some cases this can increase the parsing
speed by ~5-10x.
origin : scalar, default is 'unix'
Define the reference date. The numeric values would be parsed as number
of units (defined by `unit`) since this reference date.
- If 'unix' (or POSIX) time; origin is set to 1970-01-01.
- If 'julian', unit must be 'D', and origin is set to beginning of
Julian Calendar. Julian day number 0 is assigned to the day starting
at noon on January 1, 4713 BC.
- If Timestamp convertible, origin is set to Timestamp identified by
origin.
.. versionadded:: 0.20.0
cache : boolean, default False
If True, use a cache of unique, converted dates to apply the datetime
conversion. May produce significant speed-up when parsing duplicate
date strings, especially ones with timezone offsets.
.. versionadded:: 0.23.0
Returns
-------
ret : datetime if parsing succeeded.
Return type depends on input:
- list-like: DatetimeIndex
- Series: Series of datetime64 dtype
- scalar: Timestamp
In case when it is not possible to return designated types (e.g. when
any element of input is before Timestamp.min or after Timestamp.max)
return will have datetime.datetime type (or corresponding
array/Series).
See Also
--------
DataFrame.astype : Cast argument to a specified dtype.
to_timedelta : Convert argument to timedelta.
Examples
--------
Assembling a datetime from multiple columns of a DataFrame. The keys can be
common abbreviations like ['year', 'month', 'day', 'minute', 'second',
'ms', 'us', 'ns']) or plurals of the same
>>> df = pd.DataFrame({'year': [2015, 2016],
... 'month': [2, 3],
... 'day': [4, 5]})
>>> pd.to_datetime(df)
0 2015-02-04
1 2016-03-05
dtype: datetime64[ns]
If a date does not meet the `timestamp limitations
<http://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html
#timeseries-timestamp-limits>`_, passing errors='ignore'
will return the original input instead of raising any exception.
Passing errors='coerce' will force an out-of-bounds date to NaT,
in addition to forcing non-dates (or non-parseable dates) to NaT.
>>> pd.to_datetime('13000101', format='%Y%m%d', errors='ignore')
datetime.datetime(1300, 1, 1, 0, 0)
>>> pd.to_datetime('13000101', format='%Y%m%d', errors='coerce')
NaT
Passing infer_datetime_format=True can often-times speedup a parsing
if its not an ISO8601 format exactly, but in a regular format.
>>> s = pd.Series(['3/11/2000', '3/12/2000', '3/13/2000'] * 1000)
>>> s.head()
0 3/11/2000
1 3/12/2000
2 3/13/2000
3 3/11/2000
4 3/12/2000
dtype: object
>>> %timeit pd.to_datetime(s,infer_datetime_format=True) # doctest: +SKIP
100 loops, best of 3: 10.4 ms per loop
>>> %timeit pd.to_datetime(s,infer_datetime_format=False) # doctest: +SKIP
1 loop, best of 3: 471 ms per loop
Using a unix epoch time
>>> pd.to_datetime(1490195805, unit='s')
Timestamp('2017-03-22 15:16:45')
>>> pd.to_datetime(1490195805433502912, unit='ns')
Timestamp('2017-03-22 15:16:45.433502912')
.. warning:: For float arg, precision rounding might happen. To prevent
unexpected behavior use a fixed-width exact type.
Using a non-unix epoch origin
>>> pd.to_datetime([1, 2, 3], unit='D',
... origin=pd.Timestamp('1960-01-01'))
DatetimeIndex(['1960-01-02', '1960-01-03', '1960-01-04'], \
dtype='datetime64[ns]', freq=None)
"""
if arg is None:
return None
if origin != 'unix':
arg = _adjust_to_origin(arg, origin, unit)
tz = 'utc' if utc else None
convert_listlike = partial(_convert_listlike_datetimes, tz=tz, unit=unit,
dayfirst=dayfirst, yearfirst=yearfirst,
errors=errors, exact=exact,
infer_datetime_format=infer_datetime_format)
if isinstance(arg, Timestamp):
result = arg
if tz is not None:
if arg.tz is not None:
result = result.tz_convert(tz)
else:
result = result.tz_localize(tz)
elif isinstance(arg, ABCSeries):
cache_array = _maybe_cache(arg, format, cache, convert_listlike)
if not cache_array.empty:
result = arg.map(cache_array)
else:
values = convert_listlike(arg._values, True, format)
result = arg._constructor(values, index=arg.index, name=arg.name)
elif isinstance(arg, (ABCDataFrame, abc.MutableMapping)):
result = _assemble_from_unit_mappings(arg, errors, box, tz)
elif isinstance(arg, ABCIndexClass):
cache_array = _maybe_cache(arg, format, cache, convert_listlike)
if not cache_array.empty:
result = _convert_and_box_cache(arg, cache_array, box, errors,
name=arg.name)
else:
convert_listlike = partial(convert_listlike, name=arg.name)
result = convert_listlike(arg, box, format)
elif is_list_like(arg):
cache_array = _maybe_cache(arg, format, cache, convert_listlike)
if not cache_array.empty:
result = _convert_and_box_cache(arg, cache_array, box, errors)
else:
result = convert_listlike(arg, box, format)
else:
result = convert_listlike(np.array([arg]), box, format)[0]
return result
# mappings for assembling units
_unit_map = {'year': 'year',
'years': 'year',
'month': 'month',
'months': 'month',
'day': 'day',
'days': 'day',
'hour': 'h',
'hours': 'h',
'minute': 'm',
'minutes': 'm',
'second': 's',
'seconds': 's',
'ms': 'ms',
'millisecond': 'ms',
'milliseconds': 'ms',
'us': 'us',
'microsecond': 'us',
'microseconds': 'us',
'ns': 'ns',
'nanosecond': 'ns',
'nanoseconds': 'ns'
}
def _assemble_from_unit_mappings(arg, errors, box, tz):
"""
assemble the unit specified fields from the arg (DataFrame)
Return a Series for actual parsing
Parameters
----------
arg : DataFrame
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as NaT
- If 'ignore', then invalid parsing will return the input
box : boolean
- If True, return a DatetimeIndex
- If False, return an array
tz : None or 'utc'
Returns
-------
Series
"""
from pandas import to_timedelta, to_numeric, DataFrame
arg = DataFrame(arg)
if not arg.columns.is_unique:
raise ValueError("cannot assemble with duplicate keys")
# replace passed unit with _unit_map
def f(value):
if value in _unit_map:
return _unit_map[value]
# m is case significant
if value.lower() in _unit_map:
return _unit_map[value.lower()]
return value
unit = {k: f(k) for k in arg.keys()}
unit_rev = {v: k for k, v in unit.items()}
# we require at least Ymd
required = ['year', 'month', 'day']
req = sorted(list(set(required) - set(unit_rev.keys())))
if len(req):
raise ValueError("to assemble mappings requires at least that "
"[year, month, day] be specified: [{required}] "
"is missing".format(required=','.join(req)))
# keys we don't recognize
excess = sorted(list(set(unit_rev.keys()) - set(_unit_map.values())))
if len(excess):
raise ValueError("extra keys have been passed "
"to the datetime assemblage: "
"[{excess}]".format(excess=','.join(excess)))
def coerce(values):
# we allow coercion to if errors allows
values = to_numeric(values, errors=errors)
# prevent overflow in case of int8 or int16
if is_integer_dtype(values):
values = values.astype('int64', copy=False)
return values
values = (coerce(arg[unit_rev['year']]) * 10000 +
coerce(arg[unit_rev['month']]) * 100 +
coerce(arg[unit_rev['day']]))
try:
values = to_datetime(values, format='%Y%m%d', errors=errors, utc=tz)
except (TypeError, ValueError) as e:
raise ValueError("cannot assemble the "
"datetimes: {error}".format(error=e))
for u in ['h', 'm', 's', 'ms', 'us', 'ns']:
value = unit_rev.get(u)
if value is not None and value in arg:
try:
values += to_timedelta(coerce(arg[value]),
unit=u,
errors=errors)
except (TypeError, ValueError) as e:
raise ValueError("cannot assemble the datetimes [{value}]: "
"{error}".format(value=value, error=e))
if not box:
return values.values
return values
def _attempt_YYYYMMDD(arg, errors):
"""
try to parse the YYYYMMDD/%Y%m%d format, try to deal with NaT-like,
arg is a passed in as an object dtype, but could really be ints/strings
with nan-like/or floats (e.g. with nan)
Parameters
----------
arg : passed value
errors : 'raise','ignore','coerce'
"""
def calc(carg):
# calculate the actual result
carg = carg.astype(object)
parsed = parsing.try_parse_year_month_day(carg / 10000,
carg / 100 % 100,
carg % 100)
return tslib.array_to_datetime(parsed, errors=errors)[0]
def calc_with_mask(carg, mask):
result = np.empty(carg.shape, dtype='M8[ns]')
iresult = result.view('i8')
iresult[~mask] = tslibs.iNaT
masked_result = calc(carg[mask].astype(np.float64).astype(np.int64))
result[mask] = masked_result.astype('M8[ns]')
return result
# try intlike / strings that are ints
try:
return calc(arg.astype(np.int64))
except (ValueError, OverflowError):
pass
# a float with actual np.nan
try:
carg = arg.astype(np.float64)
return calc_with_mask(carg, notna(carg))
except (ValueError, OverflowError):
pass
# string with NaN-like
try:
mask = ~algorithms.isin(arg, list(tslib.nat_strings))
return calc_with_mask(arg, mask)
except (ValueError, OverflowError):
pass
return None
# Fixed time formats for time parsing
_time_formats = ["%H:%M", "%H%M", "%I:%M%p", "%I%M%p",
"%H:%M:%S", "%H%M%S", "%I:%M:%S%p", "%I%M%S%p"]
def _guess_time_format_for_array(arr):
# Try to guess the format based on the first non-NaN element
non_nan_elements = notna(arr).nonzero()[0]
if len(non_nan_elements):
element = arr[non_nan_elements[0]]
for time_format in _time_formats:
try:
datetime.strptime(element, time_format)
return time_format
except ValueError:
pass
return None
def to_time(arg, format=None, infer_time_format=False, errors='raise'):
"""
Parse time strings to time objects using fixed strptime formats ("%H:%M",
"%H%M", "%I:%M%p", "%I%M%p", "%H:%M:%S", "%H%M%S", "%I:%M:%S%p",
"%I%M%S%p")
Use infer_time_format if all the strings are in the same format to speed
up conversion.
Parameters
----------
arg : string in time format, datetime.time, list, tuple, 1-d array, Series
format : str, default None
Format used to convert arg into a time object. If None, fixed formats
are used.
infer_time_format: bool, default False
Infer the time format based on the first non-NaN element. If all
strings are in the same format, this will speed up conversion.
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as None
- If 'ignore', then invalid parsing will return the input
Returns
-------
datetime.time
"""
def _convert_listlike(arg, format):
if isinstance(arg, (list, tuple)):
arg = np.array(arg, dtype='O')
elif getattr(arg, 'ndim', 1) > 1:
raise TypeError('arg must be a string, datetime, list, tuple, '
'1-d array, or Series')
arg = ensure_object(arg)
if infer_time_format and format is None:
format = _guess_time_format_for_array(arg)
times = []
if format is not None:
for element in arg:
try:
times.append(datetime.strptime(element, format).time())
except (ValueError, TypeError):
if errors == 'raise':
msg = ("Cannot convert {element} to a time with given "
"format {format}").format(element=element,
format=format)
raise ValueError(msg)
elif errors == 'ignore':
return arg
else:
times.append(None)
else:
formats = _time_formats[:]
format_found = False
for element in arg:
time_object = None
for time_format in formats:
try:
time_object = datetime.strptime(element,
time_format).time()
if not format_found:
# Put the found format in front
fmt = formats.pop(formats.index(time_format))
formats.insert(0, fmt)
format_found = True
break
except (ValueError, TypeError):
continue
if time_object is not None:
times.append(time_object)
elif errors == 'raise':
raise ValueError("Cannot convert arg {arg} to "
"a time".format(arg=arg))
elif errors == 'ignore':
return arg
else:
times.append(None)
return times
if arg is None:
return arg
elif isinstance(arg, time):
return arg
elif isinstance(arg, ABCSeries):
values = _convert_listlike(arg._values, format)
return arg._constructor(values, index=arg.index, name=arg.name)
elif isinstance(arg, ABCIndexClass):
return _convert_listlike(arg, format)
elif is_list_like(arg):
return _convert_listlike(arg, format)
return _convert_listlike(np.array([arg]), format)[0]
| |
import numpy as np
import pandas as pd
class DescriptionVars:
"""Class for generating, storing and retrieving the individual description
of each one of the variables of the dataset.
There are different types of description fomats in this class:
* Raw data: Plots, images and artificial description separated.
* tex: export LaTeX format.
"""
def __init__(self, variablename, vartype='', path_templates=''):
"""This function call the other ones and initialize all the values of
this class in order to be stored and used for a superior class.
If it is not input the dataframe it will wait for the
calculate_description function calling.
"""
# variable information.
## TODO: probably data-dict center should be used to generate
#the initial information and the natural text.
self.variablename = variablename
self.vartype = vartype
# Store plots, tables and text. RAW data.
self.plots = {}
self.tables = {}
# Natural text and artificial generated one.
# TODO: Natural have to be inserted in the initialization.
self.text = {}
# String in which is saved the whole page
self.tex = ''
# Measure of utility of this variabledescription_input
# TODO: implement a function to calculate this.
# It could be a list of values.
self.utility_value = 0
# We suppose it is the path of the templates
self.path_templates = path_templates
def calculate_description(self, dataframe, dataframe_y=''):
"""Calculate description is to fill the plots, tables required."""
# NO return
#TODO: Missing value replacement??
#NaN and '' problems. Or it is better to do it in the data-dict center.
column = dataframe.replace({self.variablename:
{'': NaN}})[self.variablename]
#column = dataframe[self.variablename]
############# GENERATE DESCRIPTION TABLE ################
# calculation of the missing value proportion.
m = column.size
missing = (m - column.count())/m
if self.vartype == '':
#TODO: Add "intelligence" (identify str or numeric)
self.vartype = 'Categorical'
if self.vartype == 'Categorical':
#TODO: calculate description
categories = list(column.unique())
number_cats = len(categories)
if dataframe_y:
conversion = []
for e in categories:
# TOTEST
conversion.append(float(dataframe_y[column == e].mean()))
# calculate Gini index or sth like or the unequality index
mode = column.mode()[0]
vmode = column[(column == mode)].count()
table = pd.DataFrame([str(number_cats), mode, str(vmode),
"{0:.2f}".format(missing*100) + ' %'])
table = table.transpose()
table.columns = ['# cats', 'mode', 'volumn_mode', '% missings']
elif self.vartype == 'Ordinal':
#TODO: Problems, could be ordinal but in string expression.
# (ex: Bad, regular good.) Search for solutions.
pass
elif self.vartype == 'Numerical':
#TODO: calculate description
# column = lista[self.variablename].apply(int)
# In theory it is formatted as we want.
#Else we have a problem, but this line it shouldnt be needed.
rang = [column.min(), column.max()]
mean = column.mean()
std = column.std()
table = pd.DataFrame([str(rang), "{0:.2f}".format(mean),
"{0:.2f}".format(std),
"{0:.2f}".format(missing*100) + ' %'])
table = table.transpose()
table.columns = ['range', 'mean', 'std', '% missings']
# probably histogram to calculate conversion?
self.table['Description'] = table
#########################################################
#TODO: generate tables
#TODO: generate plots
def generate_latex_report(self, path_templates=''):
"""It generates a report of latex in which it is shown the whole
description of the selected variable."""
# TODO: read this texts from files in the package.
#TODO:
fl = open(self.path_templates + 'page.txt', "r")
page = fl.read()
fl.close()
vardescription_str = self.text['Natural']
# tabledescriptor_str =
# plots_str =
# comments_str =
artificialcomments_str = ''
page = Template(page).\
safe_substitute(vardescription=vardescription_str,
tabledescriptor=tabledescriptor_str,
plots=plots_str, comments=comments_str,
artificialcomments=artificialcomments_str)
return page
def generate_table_tex(self, nametable, table, title_table,
caption_bool=True):
"""Transform a table to a tex code using dataframe option a tabular
tex environment."""
# TODO: Read from a file.
fl = open(self.path_templates + 'table.txt', "r")
table_str = fl.read()
fl.close()
## TODO:
# *types of tables
# *deal with strange names of variables or spaces
# if table_title == :
# description_caption =
# elif table_title == :
# description_caption =
tablelabel_str = title_table + r'''_univariate''' + self.variablename
if caption_bool:
caption_str = Template(r'''\caption{$description}''').\
safe_substitute(description=description_caption)
else:
caption_str = ''
table = Template(table_str).\
safe_substitute(tabular=table.to_latex(), caption=caption_str,
tablelabel=tablelabel_str)
return table
def generate_plots_tex(self, images, title_block_images,
caption_bool=True):
"""Generate the tex code to present images in the report."""
if not images:
#report error
message = "This function needs to be passed the images. "
message += "The variable " + self.variablename
message += " needs to have an image."
raise Exception(message)
# figure environment generation
fl = open(self.path_templates + 'image.txt', "r")
image_str = fl.read()
fl.close()
# if title_block_images == :
# description_caption =
# elif title_block_images == :
# description_caption =
imagelabel_str =\
title_block_images + r'''_univariate''' + self.variablename
# figures tex generation and saving the files.
l = len(images)
if l == 1:
graphics_str = r'''
\includegraphics[width=0.9\textwidth]{$folder$imagename.png}
'''
graphics_str = Template(graphics_str).\
safe_substitute(folder='', imagename='')
# TODO: Save the files
elif l == 2:
graphics_str = r'''
\includegraphics[width=0.45\textwidth]{$folder$imagename1.png}
\includegraphics[width=0.45\textwidth]{$folder$imagename2.png}
'''
graphics_str = Template(graphics_str).\
safe_substitute(folder='', imagename1='', imagename2='')
# TODO: Save the files
elif l == 3:
graphics_str = r'''
\includegraphics[width=0.45\textwidth]{$folder$imagename1.png}
\includegraphics[width=0.45\textwidth]{$folder$imagename2.png}
\includegraphics[width=0.45\textwidth]{$folder$imagename3.png}
'''
graphics_str = Template(graphics_str).\
safe_substitute(folder='', imagename1='', imagename2='',
imagename3='')
# TODO: Save the files
elif l == 4:
graphics_str = r'''
\includegraphics[width=0.45\textwidth]{$folder$imagename1.png}
\includegraphics[width=0.45\textwidth]{$folder$imagename2.png}
\includegraphics[width=0.45\textwidth]{$folder$imagename3.png}
\includegraphics[width=0.45\textwidth]{$folder$imagename4.png}
'''
graphics_str =\
Template(graphics_str).safe_substitute(folder='',
imagename1='',
imagename2='',
imagename3='',
imagename4='')
# TODO: Save the files
# caption generation
if caption_bool:
caption_str = Template(r'''\caption{$description}''').\
safe_substitute(description=description_caption)
else:
caption_str = ''
image = Template(image_str).safe_substitute(graphics=graphics_str,
caption=caption_str,
imagelabel=imagelabel_str)
def show_terminal(self):
"""Interactive function in order to show the properties of a variable
in the terminal and take decisions easier.
"""
for e in self.tables:
print(e)
for e in self.plots:
e.show()
class DescriptionRelations:
"""It is a class which contains the description of relationships between
variables.
"""
def __init__(self, ):
self.plots = {}
def generate_latex_report(self,):
"""It generates a report of latex in which it is show the whole
description of the selected variables relationship.
"""
# subsection (variables names)
return page
class TablonDescription:
"""This class is a collection of DescriptionVars. It contains the
information of the data and the information of each variable.
"""
def __init__(self, dataframe):
## information from client. Probably this information should be
#integrated in the data-dict center
self.clientname = ''
self.codename = ''
# Natural desctription
# description of each ones of the variables in text.
self.variabledescription_input = {}
# Artificial description generated.
self.variabledescription_output = {}
# Text of the whole description.
# description of the whole dataset.
# It is a text string with the description.
self.tex = ''
# variable list obtained from dataframe
self.variablelist = dataframe.columns
## Initialization to fill
# TODO: If we want less variables??
# {variablename: object(DescriptionVars)}
self.univariate_dict = {}
# TODO:
# TODO: List of tuple of lists?
#First as heuristic easy computable measure as corr? Dummy
self.bivariate_list = []
# WTF is this??
# initialize empty object of data description
self.total_description =
def explore(self):
"""Explore the whole dataset. Fill the variabledescription_input.
"""
# obtain the self.univariate_list and the others
# TODO: The others.
# Univariate study
# TODO: We need the data-dict center in order to know the variabletype.
for variablename in self.variablelist:
self.univariate_dict[variablename] =\
DescriptionVars(variablename, '')
def show_report(self, variablename):
"""It shows in terminal all the information available of the desired
variable."""
description = self.variabledescription_output[variablename]
#TODO
description.show_terminal()
return
def export_results(self, filename='', pathfile='', documenttype=''):
"""This function export the results obtained in the description of the
whole dataset. It compile the .*tex and obtain the pdf.
References
----------
[1] .. http://stackoverflow.com/questions/8085520/generating-pdf-latex-
with-python-script
"""
# Built a pdf as a concatenation of general instructions.
if not pathfile:
pathfile = "tex_templates/"
##### It is better to be read from a file.
# header it is the usepackage part
fl = open(pathfile + 'header.txt', "r")
header = fl.read()
fl.close()
fl = open(pathfile + 'portada.txt', "r")
portada = fl.read()
fl.close()
portada = Template(portada).safe_substitute(self.clientname)
# from external file. It could include not only \tableofcontents
fl = open(pathfile + 'indice.txt', "r")
indice = fl.read()
fl.close()
###########################################
self.univariate_dict = self.generate_univariate(pathfile)
# TODO: Include bivariate, conclusions and others ideas.
content = header + portada + indice
content += self.univariate_dict + '\n\n\n\end{document}'
# Decode for useful recognition of the string in the latex format.
content = content.decode('utf-8')
#Use Texcaller to compile *.tex
#import texcaller
#texcaller.convert(source, source_format, result_format, max_runs)
# returns a pair (result, info)
# pdf, info = texcaller.convert(latex, 'LaTeX', 'PDF', 5)
return
def generate_univariate(self, pathfile):
''' Generate the self.univariate_list.'''
# section.txt: It should contain $contents and $list_vars_description
fl = open(pathfile + 'section.txt', "r")
section = fl.read()
fl.close()
# probably we will need more than one type of spaces.
fl = open(pathfile + 'space.txt', "r")
space = fl.read()
fl.close()
contents_str = r''' '''
for e in self.univariate_list:
contents_str = contents_str + e.tex + space
section = Template(section).safe_substitute(list_vars_description='',
contents=contents_str)
return section
def generate_bivariate(self):
"""Generate the self.bivariate_list."""
return
| |
# for python 3
# You'll need to customize this according to your needs. Proper orientation of
# the kinect is vital; if participants are able to maintain their head or wrists
# continuously inside the word rects, they will repeatedly trigger the collision
# detection
from pykinect2 import PyKinectV2
from pykinect2.PyKinectV2 import *
from pykinect2 import PyKinectRuntime
import pygame
import random
import os
import sys
TRACKING_COLOR = pygame.color.Color("green")
HIGHLIGHT_COLOR = pygame.color.Color("red")
BG_COLOR = pygame.color.Color("white")
GAME_TIME = 60# seconds
class BodyGameRuntime(object):
def __init__(self):
pygame.init()
pygame.mixer.init()
self.beep_sound = pygame.mixer.Sound('audio\\beep.ogg')
self.buzz_sound = pygame.mixer.Sound('audio\\buzz.ogg')
self._screen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN, 32)
pygame.display.set_caption("Kinect Game Framework Test")
self.finished = False
self._clock = pygame.time.Clock()
self._kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Color |
PyKinectV2.FrameSourceTypes_Body)
self._frame_surface = pygame.Surface((self._kinect.color_frame_desc.Width,
self._kinect.color_frame_desc.Height), 0, 32)
self._bodies = None
self.score = 0
self.vocab_dict = {"People drive ____ these days.":["quickly", "quick"],
"She has an ____ dog.":["active", "actively"],
"He ____ opens the mail.":["carefully", "careful"],
"The man ____ greets his friends.":["cheerfully", "cheerful"],
"That is a ____ sofa!":["comfortable", "comfortably"],
"The alarm sounds ____.":["continuously", "continuous"],
"That woman is ____!":["crazy", "crazily"],
"The woman speaks ____.":["delightfully", "delightful"],
"Juan is a very ____ carpenter.":["creative", "creatively"],
"Wow! That is a ____ storm!":["destructive", "destructively"],
"The racecar drove ____ by the school.":["powerfully", "powerful"],
"Juana ____ said NO!":["firmly", "firm"],
"He ____ opened the door.":["forcefully", "forceful"],
"It was a ____ day.":["glorious", "gloriously"],
"Maria ____ observed her ex-boyfriend.":["hatefully", "hateful"],
"He had a ___ idea.":["hopeful", "hopefully"],
"It was an ____ phrase.":["insulting", "insultingly"],
"Jenny ____ ate the last cookie.":["intentionally", "intentional"],
"He likes ____ music.":["irritating", "irritatingly"],
"Careful! That is a ___ dog!":["bad", "badly"],
"The man reacted ___ to the good news.":["speedily", "speedy"],
"Susana has always been a ____ girl.":["nice", "nicely"],
"The boys plunged into the ____ water.":["deep", "deeply"],
"The girl ____ saved her cat from the fire.":["bravely", "brave"],
"The man ____ drank too much alcohol.":["foolishly", "foolish"],
"Mario is ____ and never does his homework.":["lazy", "lazily"],
"The teacher is very ____.":["rude", "rudely"],
"The girl plays soccer ____.":["perfectly", "perfect"],
"It was an ____ crash.":["accidental", "accidentally"],
"That is an ____ turtle!.":["angry", "angrily"],
"She ____ ate her beans.":["happily", "happy"],
"John spoke ____.":["seriously", "serious"],
"Firulais is a ____ dog.":["loyal", "loyally"],
"Margie yelled ____ into the night.":["blindly", "blind"],
"He ran ____ toward me.":["wildly", "wild"],
"Pedro is ____!":["innocent", "innocently"],
"The gross man winked at her ____.":["sexually", "sexual"],
"Concepcion is a ____ girlfriend.":["jealous", "jealously"],
"Luis ____ goes to the bar.":["frequently", "frequent"],
"We didn't go out because it was raining ____.":["heavily", "heavy"],
"Our team lost the game because we played ____.":["badly", "bad"],
"We waited ____.":["patiently", "patient"],
"Jimmy arrived ____.":["unexpectedly", "unexpected"],
"Mike stays fit by playing tennis ____.":["regularly", "regular"],
"The driver of the car was ____ injured.":["seriously", "serious"],
"The driver of the car had ____ injuries.":["serious", "seriously"],
"Ismael looked ____ at Eleazar.":["hungrily", "hungry"],
"She is a ____ driver.":["dangerous", "dangerously"]}
self._frame_surface.fill((255, 255, 255))
def text_objects(self, text, font):
text_surface = font.render(text, True, (0, 0, 0))
return text_surface, text_surface.get_rect()
def message_display(self, text, loc_tuple, loc_int):
# loc_int: 1 center, 2 top left, 3 bottom left, 4 bottom right, 5 top right
text_surf, text_rect = self.text_objects(text, pygame.font.Font(None, 64))
loc_dict = {1:'text_rect.center', 2:'text_rect.topleft', 3:'text_rect.bottomleft',
4:'text_rect.bottomright', 5:'text_rect.topright'}
exec(loc_dict[loc_int] + ' = loc_tuple')
self._frame_surface.blit(text_surf, text_rect)
return text_rect
def draw_ind_point(self, joints, jointPoints, color, highlight_color, rect0, rect1, joint0, words, sentence, correct_word):
joint0State = joints[joint0].TrackingState;
if (joint0State == PyKinectV2.TrackingState_NotTracked or
joint0State == PyKinectV2.TrackingState_Inferred):
return
center = (int(jointPoints[joint0].x), int(jointPoints[joint0].y))
if (rect0.collidepoint(center) and words[0] == correct_word) or (rect1.collidepoint(center) and words[1] == correct_word):
self.score += 1
self.beep_sound.play()
pygame.time.delay(500)
self.new_round()
elif rect0.collidepoint(center) or rect1.collidepoint(center):
try:
pygame.draw.circle(self._frame_surface, highlight_color, center, 20, 0)
self.score -= 1
self.buzz_sound.play()
pygame.time.delay(500)
self.new_round()
except:
pass
else:
try:
pygame.draw.circle(self._frame_surface, color, center, 20, 0)
except:
pass
def draw_ind_intro_point(self, joints, jointPoints, color, joint0):
joint0State = joints[joint0].TrackingState;
if (joint0State == PyKinectV2.TrackingState_NotTracked or
joint0State == PyKinectV2.TrackingState_Inferred):
return
center = (int(jointPoints[joint0].x), int(jointPoints[joint0].y))
try:
pygame.draw.circle(self._frame_surface, color, center, 20, 0)
except:
pass
def update_intro_screen(self, joints, jointPoints, color):
self._frame_surface.fill(BG_COLOR)# blank screen before drawing points
self.draw_ind_intro_point(joints, jointPoints, color, PyKinectV2.JointType_Head)
self.draw_ind_intro_point(joints, jointPoints, color, PyKinectV2.JointType_WristLeft)
# may change PyKinectV2.JointType_WristRight to PyKinectV2.JointType_ElbowRight
self.draw_ind_intro_point(joints, jointPoints, color, PyKinectV2.JointType_WristRight)
def update_screen(self, joints, jointPoints, color, highlight_color, words, sentence, correct_word, seconds):
self._frame_surface.fill(BG_COLOR)
self.message_display(sentence, (300, 900), 2)
rect0 = self.message_display(words[0], (400, 300), 1)
rect1 = self.message_display(words[1], (self._frame_surface.get_width() - 400, 300), 1)
self.message_display(str(self.score), (self._frame_surface.get_width() / 2, 800), 1)
self.message_display(str(seconds), (self._frame_surface.get_width() - 300, 800), 1)
self.draw_ind_point(joints, jointPoints, color, highlight_color, rect0,
rect1, PyKinectV2.JointType_Head, words, sentence, correct_word)
self.draw_ind_point(joints, jointPoints, color, highlight_color, rect0,
rect1, PyKinectV2.JointType_WristRight, words, sentence, correct_word)
# may change PyKinectV2.JointType_WristRight to PyKinectV2.JointType_ElbowRight
self.draw_ind_point(joints, jointPoints, color, highlight_color, rect0,
rect1, PyKinectV2.JointType_WristLeft, words, sentence, correct_word)
def end_game(self):
self._frame_surface.fill(BG_COLOR)
self.message_display("Score: {}".format(self.score), (self._frame_surface.get_width() / 2, self._frame_surface.get_height() / 2), 1)
self._screen.blit(self._frame_surface, (0, 0))
pygame.display.update()
pygame.time.delay(3000)
self.run()
def new_round(self):
sentence = random.sample(list(self.vocab_dict), 1)[0]
words = self.vocab_dict[sentence][:]
correct_word = words[0]
random.shuffle(words)
pygame.time.delay(500)
while not self.finished:
seconds = int(GAME_TIME - (pygame.time.get_ticks() - self.start_ticks)/1000)
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.finished = True
if event.type == pygame.KEYUP and event.key == pygame.K_ESCAPE:
self.finished = True
if seconds <= 0:
self.end_game()
if self._kinect.has_new_body_frame():
self._bodies = self._kinect.get_last_body_frame()
if self._bodies is not None:
for i in range(0, self._kinect.max_body_count):
body = self._bodies.bodies[i]
if not body.is_tracked:
continue
joints = body.joints
joint_points = self._kinect.body_joints_to_color_space(joints)
self.update_screen(joints, joint_points, TRACKING_COLOR, HIGHLIGHT_COLOR, words, sentence, correct_word, seconds)
self._screen.blit(self._frame_surface, (0,0))
pygame.display.update()
self._clock.tick(60)
self.end_game()
def run(self):
self.score = 0
while not self.finished:
if self._kinect.has_new_body_frame():
self._bodies = self._kinect.get_last_body_frame()
if self._bodies is not None:
for i in range(0, self._kinect.max_body_count):
body = self._bodies.bodies[i]
if not body.is_tracked:
continue
joints = body.joints
joint_points = self._kinect.body_joints_to_color_space(joints)
self.update_intro_screen(joints, joint_points, TRACKING_COLOR)
self._screen.blit(self._frame_surface, (0,0))
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.finished = True
if event.type == pygame.KEYUP and event.key == pygame.K_SPACE:
self.start_ticks = pygame.time.get_ticks()
self.new_round()
if event.type == pygame.KEYUP and event.key == pygame.K_ESCAPE:
self.finished = True
self._clock.tick(60)
self._kinect.close()
pygame.quit()
#os._exit(0)
sys.exit()
if __name__ == "__main__":
game = BodyGameRuntime()
game.run()
| |
import os
import re
from bs4 import BeautifulSoup
from django.core.exceptions import ObjectDoesNotExist
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from django.db import models
from django.db.models import Case, Count, Q, Value, When
from django.utils.encoding import python_2_unicode_compatible
from django.utils.html import mark_safe
from modelcluster.fields import ParentalKey
from modelcluster.tags import ClusterTaggableManager
from taggit.models import Tag, TaggedItemBase
from core import panels
from core.forms import SubmitFormBuilder
from core.utilities import has_recaptcha, validate_only_one_instance
from wagtail.wagtailcore.fields import RichTextField
from wagtail.wagtailcore.models import Page
from wagtail.wagtailforms.models import AbstractEmailForm, AbstractFormField
from wagtail.wagtailsearch import index
from wagtailcaptcha.models import WagtailCaptchaEmailForm
class IndexPage(models.Model):
"""
Abstract Index Page class. Declare a couple of abstract methods that should be implemented by
any class implementing this 'interface'.
"""
def clean(self):
validate_only_one_instance(self)
def children(self):
raise NotImplementedError("Class %s doesn't implement aMethod()" % (self.__class__.__name__))
def get_context(self, request, *args, **kwargs):
raise NotImplementedError("Class %s doesn't implement aMethod()" % (self.__class__.__name__))
class Meta:
abstract = True
class HomePage(Page, IndexPage):
"""
HomePage class, inheriting from wagtailcore.Page straight away
"""
subpage_types = [
'core.CompanyIndex',
'core.SubmitFormPage',
]
feed_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
search_fields = []
body = RichTextField(blank=True, features=['bold', 'italic', 'ol', 'ul', 'link', 'cleanhtml'])
@property
def og_image(self):
# Returns image and image type of feed_image, if exists
image = {'image': None, 'type': None}
if self.feed_image:
image['image'] = self.feed_image
name, extension = os.path.splitext(image['image'].file.url)
image['type'] = extension[1:]
return image
def children(self):
return self.get_children().live()
def get_context(self, request, *args, **kwargs):
# Get pages
pages = WagtailSitePage.objects\
.live()\
.descendant_of(self)\
.order_by('-is_featured', '-latest_revision_created_at')
# Filter by tag
tag = request.GET.get('tag')
if tag:
pages = pages.filter(tags__slug__iexact=tag)
# Pagination
page = request.GET.get('page')
paginator = Paginator(pages, 12) # Show 12 pages per page
try:
pages = paginator.page(page)
except PageNotAnInteger:
pages = paginator.page(1)
except EmptyPage:
pages = paginator.page(paginator.num_pages)
# Update template context
context = super(HomePage, self).get_context(request, *args, **kwargs)
context['pages'] = pages
context['tag'] = tag
# Only tags used by live pages
context['tags'] = Tag.objects.filter(
core_pagetag_items__isnull=False,
core_pagetag_items__content_object__live=True
).annotate(count=Count('core_pagetag_items')).distinct().order_by('-count', 'name')
return context
class Meta:
verbose_name = "Home Page"
content_panels = panels.HOME_PAGE_CONTENT_PANELS
promote_panels = panels.WAGTAIL_PAGE_PROMOTE_PANELS
class CompanyIndex(Page, IndexPage):
"""
HomePage class, inheriting from wagtailcore.Page straight away
"""
parent_types = ['core.HomePage']
subpage_types = ['core.WagtailCompanyPage']
search_fields = []
body = RichTextField(null=True, blank=True, features=['bold', 'italic', 'ol', 'ul', 'link', 'cleanhtml'])
show_map = models.BooleanField(default=False, help_text='Show map of companies around the world.')
def children(self):
return self.get_children().live()
def get_context(self, request, *args, **kwargs):
# Get pages.
# Note: `numchild` includes draft/unpublished pages but does not create additional queries.
pages = WagtailCompanyPage.objects\
.live()\
.descendant_of(self)\
.distinct()\
.order_by('-numchild', '-latest_revision_created_at')
# Filter by tag
tag = request.GET.get('tag')
if tag:
pages = pages.filter(tags__name__iexact=tag)
# Pagination
page = request.GET.get('page')
paginator = Paginator(pages, 12)
try:
pages = paginator.page(page)
except PageNotAnInteger:
pages = paginator.page(1)
except EmptyPage:
pages = paginator.page(paginator.num_pages)
# Update template context
context = super(CompanyIndex, self).get_context(request, *args, **kwargs)
context['pages'] = pages
context['tag'] = tag
return context
class Meta:
verbose_name = "Companies Index Page"
content_panels = panels.WAGTAIL_COMPANY_INDEX_PAGE_CONTENT_PANELS
class PageTag(TaggedItemBase):
content_object = ParentalKey('core.WagtailPage', related_name='tagged_items')
# Main core Page model. All main content pages inherit from this class.
class WagtailPage(Page):
"""
Our main custom Page class. All content pages should inherit from this one.
"""
parent_types = ['core.HomePage']
subpage_types = ['core.WagtailPage']
is_creatable = False
feed_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
body = RichTextField(blank=True, features=['bold', 'italic', 'ol', 'ul', 'link', 'cleanhtml'])
tags = ClusterTaggableManager(through=PageTag, blank=True)
search_fields = []
@property
def parent(self):
try:
return self.get_ancestors().reverse()[0]
except IndexError:
return None
@property
def child(self):
for related_object in self._meta.get_all_related_objects():
if not issubclass(related_object.model, self.__class__):
continue
try:
return getattr(self, related_object.get_accessor_name())
except ObjectDoesNotExist:
pass
@property
def body_text(self):
return BeautifulSoup(self.body, "html5lib").get_text()
@property
def body_excerpt(self):
"""
Return body text replacing end of lines (. ? ! chars) with a blank space
"""
return re.sub(r'([\.?!])([a-zA-Z])', r'\1 \2', self.body_text)
@property
def og_image(self):
# Returns image and image type of feed_image or image as fallback, if exists
image = {'image': None, 'type': None}
if self.feed_image:
image['image'] = self.feed_image
name, extension = os.path.splitext(image['image'].file.url)
image['type'] = extension[1:]
return image
class Meta:
verbose_name = "Content Page"
content_panels = panels.WAGTAIL_PAGE_CONTENT_PANELS
promote_panels = panels.WAGTAIL_PAGE_PROMOTE_PANELS
class WagtailCompanyPage(WagtailPage):
"""
Company page listing a bunch of site pages
"""
parent_types = ['core.HomePage']
subpage_types = ['core.WagtailSitePage']
SITES_ORDERING_ALPHABETICAL = 'alphabetical'
SITES_ORDERING_CREATED = 'created'
SITES_ORDERING_PATH = 'path'
SITES_ORDERING = {
SITES_ORDERING_PATH: {
'name': 'Path (i.e. manual)',
'ordering': ['-path'],
},
SITES_ORDERING_ALPHABETICAL: {
'name': 'Alphabetical',
'ordering': ['title'],
},
SITES_ORDERING_CREATED: {
'name': 'Created',
'ordering': ['-first_published_at'],
},
}
SITES_ORDERING_CHOICES = [
(key, opts['name'])
for key, opts in sorted(SITES_ORDERING.items(), key=lambda k: k[1]['name'])
]
company_url = models.URLField(
blank=True,
null=True,
help_text='The URL of your site, something like "https://www.springload.co.nz"',
)
github_url = models.URLField(null=True, blank=True)
twitter_url = models.URLField(null=True, blank=True)
location = models.CharField(max_length=128, blank=True, null=True)
show_map = models.BooleanField(default=True, help_text='Show company in the map of companies around the world.')
coords = models.CharField(max_length=255, blank=True, null=True)
logo = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
sites_ordering = models.CharField(
max_length=20,
blank=False,
choices=SITES_ORDERING_CHOICES,
default=SITES_ORDERING_CREATED,
help_text='The order the sites will be listed on the page',
)
search_fields = Page.search_fields + [
index.SearchField('company_url', boost=1),
index.SearchField('body_text', boost=1)
]
@property
def lat(self):
if self.coords:
return self.coords.split(",")[0].strip()
else:
return None
@property
def lon(self):
if self.coords:
return self.coords.split(",")[1].strip()
else:
return None
@property
def twitter_handler(self):
if self.twitter_url:
return "@%s" % self.twitter_url.strip('/ ').split("/")[-1]
else:
return None
@property
def github_user(self):
if self.github_url:
return self.github_url.strip('/ ').split("/")[-1]
else:
return None
@property
def children_count(self):
return self.children().count()
@property
def og_image(self):
# Returns image and image type of logo or feed_image as fallback, if exists
image = {'image': None, 'type': None}
if self.logo:
image['image'] = self.logo
elif self.feed_image:
image['image'] = self.feed_image
name, extension = os.path.splitext(image['image'].file.url)
image['type'] = extension[1:]
return image
def children(self):
user_ordering = self.SITES_ORDERING[self.sites_ordering]['ordering']
pages = WagtailSitePage.objects.live().filter(Q(path__startswith=self.path) | Q(in_cooperation_with=self))
# When ordering by `path`, the collaborations would either all be listed first or last
# depending on whether the collaborator(s) page(s) was created before or after this page.
# Adding an overwrite here so collaborations always appear last.
if self.sites_ordering == self.SITES_ORDERING_PATH:
pages = pages.annotate(
is_own=Case(
When(path__startswith=self.path, then=Value(True)),
default_value=Value(False),
output_field=models.BooleanField(),
)
).order_by('is_own', *user_ordering)
# When ordering alphabetically or by creation date,
# own sites and collaboration sites will be sorted together.
else:
pages = pages.order_by(*user_ordering)
return pages
def get_context(self, request, *args, **kwargs):
# Get pages
pages = self.children()
# Pagination
page = request.GET.get('page')
paginator = Paginator(pages, 12) # Show 12 pages per page
try:
pages = paginator.page(page)
except PageNotAnInteger:
pages = paginator.page(1)
except EmptyPage:
pages = paginator.page(paginator.num_pages)
# Update template context
context = super(WagtailCompanyPage, self).get_context(request, *args, **kwargs)
context['pages'] = pages
return context
@property
def sites_count(self):
# Note: It uses `self.numchild` which counts draft/unpublished pages but does not create additional queries.
return self.get_children_count()
class Meta:
verbose_name = "Company Page"
content_panels = panels.WAGTAIL_COMPANY_PAGE_CONTENT_PANELS
settings_panels = panels.WAGTAIL_COMPANY_PAGE_SETTINGS_PANELS
@python_2_unicode_compatible
class WagtailSitePage(WagtailPage):
"""
Site page
"""
parent_types = ['core.WagtailCompanyPage']
subpage_types = []
is_featured = models.BooleanField(
"Featured",
default=False,
blank=False,
help_text='If enabled, this site will appear on top of the sites list of the homepage.'
)
site_screenshot = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
help_text=mark_safe(
'Use a <b>ratio</b> of <i>16:13.28</i> '
'and a <b>size</b> of at least <i>1200x996 pixels</i> '
'for an optimal display.'
),
)
site_url = models.URLField(
blank=True,
null=True,
help_text='The URL of your site, something like "https://www.springload.co.nz"',
)
in_cooperation_with = models.ForeignKey(
'core.WagtailCompanyPage',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
)
search_fields = Page.search_fields + [
index.SearchField('site_url'),
index.SearchField('body_text')
]
@property
def og_image(self):
# Returns image and image type of feed_image, if exists
image = {'image': None, 'type': None}
if self.feed_image:
image['image'] = self.feed_image
elif self.site_screenshot:
image['image'] = self.site_screenshot
name, extension = os.path.splitext(image['image'].file.url)
image['type'] = extension[1:]
return image
def __str__(self):
if self.site_url:
return '%s - %s' % (self.title, self.site_url)
return self.title
class Meta:
verbose_name = "Site Page"
content_panels = panels.WAGTAIL_SITE_PAGE_CONTENT_PANELS
promote_panels = panels.WAGTAIL_SITE_PAGE_PROMOTE_PANELS
class SubmitFormField(AbstractFormField):
page = ParentalKey('SubmitFormPage', related_name='form_fields')
class SubmitFormPage(WagtailCaptchaEmailForm if has_recaptcha() else AbstractEmailForm):
"""
Form page, inherits from WagtailCaptchaEmailForm if available, otherwise fallback to AbstractEmailForm
"""
def __init__(self, *args, **kwargs):
super(SubmitFormPage, self).__init__(*args, **kwargs)
# WagtailCaptcha does not respect cls.form_builder and overwrite with its own.
# See https://github.com/springload/wagtail-django-recaptcha/issues/7 for more info.
self.form_builder = SubmitFormBuilder
parent_types = ['core.HomePage']
subpage_types = []
search_fields = []
body = RichTextField(blank=True, help_text='Edit the content you want to see before the form.')
thank_you_text = RichTextField(blank=True, help_text='Set the message users will see after submitting the form.')
class Meta:
verbose_name = "Form Page"
content_panels = panels.SUBMIT_FORM_PAGE_CONTENT_PANELS
| |
"""Undocumented Module"""
__all__ = ['FourState']
from direct.directnotify import DirectNotifyGlobal
#import DistributedObject
import ClassicFSM
import State
class FourState:
"""
Generic four state ClassicFSM base class.
This is a mix-in class that expects that your derived class
is a DistributedObject.
Inherit from FourStateFSM and pass in your states. Two of
the states should be oposites of each other and the other
two should be the transition states between the first two.
E.g.
+--------+
-->| closed | --
| +--------+ |
| |
| v
+---------+ +---------+
| closing |<----->| opening |
+---------+ +---------+
^ |
| |
| +------+ |
----| open |<---
+------+
There is a fifth off state, but that is an implementation
detail (and that's why it's not called a five state ClassicFSM).
I found that this pattern repeated in several things I was
working on, so this base class was created.
"""
notify = DirectNotifyGlobal.directNotify.newCategory('FourState')
def __init__(self, names, durations = [0, 1, None, 1, 1]):
"""
names is a list of state names
E.g.
['off', 'opening', 'open', 'closing', 'closed',]
e.g. 2:
['off', 'locking', 'locked', 'unlocking', 'unlocked',]
e.g. 3:
['off', 'deactivating', 'deactive', 'activating', 'activated',]
durations is a list of time values (floats) or None values.
Each list must have five entries.
More Details
Here is a diagram showing the where the names from the list
are used:
+---------+
| 0 (off) |----> (any other state and vice versa).
+---------+
+--------+
-->| 4 (on) |---
| +--------+ |
| |
| v
+---------+ +---------+
| 3 (off) |<----->| 1 (off) |
+---------+ +---------+
^ |
| |
| +---------+ |
--| 2 (off) |<--
+---------+
Each states also has an associated on or off value. The only
state that is 'on' is state 4. So, the transition states
between off and on (states 1 and 3) are also considered
off (and so is state 2 which is oposite of 4 and therefore
oposite of 'on').
"""
assert self.debugPrint("FourState(names=%s)"%(names))
self.track = None
self.stateTime = 0.0
self.names = names
self.durations = durations
self.states = {
0: State.State(names[0],
self.enterState0,
self.exitState0,
[names[1],
names[2],
names[3],
names[4]]),
1: State.State(names[1],
self.enterState1,
self.exitState1,
[names[2], names[3]]),
2: State.State(names[2],
self.enterState2,
self.exitState2,
[names[3]]),
3: State.State(names[3],
self.enterState3,
self.exitState3,
[names[4], names[1]]),
4: State.State(names[4],
self.enterState4,
self.exitState4,
[names[1]]),
}
self.stateIndex = 0
self.fsm = ClassicFSM.ClassicFSM('FourState',
self.states.values(),
# Initial State
names[0],
# Final State
names[0],
)
self.fsm.enterInitialState()
def setTrack(self, track):
assert self.debugPrint("setTrack(track=%s)"%(track,))
if self.track is not None:
self.track.pause()
self.track = None
if track is not None:
track.start(self.stateTime)
self.track = track
def enterStateN(self, stateIndex):
self.stateIndex = stateIndex
self.duration = self.durations[stateIndex] or 0.0
# The AI is the authority on setting the On value.
# If the client wants the state changed it needs to
# send a request to the AI.
#def setIsOn(self, isOn):
# assert self.debugPrint("setIsOn(isOn=%s)"%(isOn,))
# pass
def isOn(self):
assert self.debugPrint("isOn() returning %s (stateIndex=%s)"%(self.stateIndex==4, self.stateIndex))
return self.stateIndex==4
def changedOnState(self, isOn):
"""
Allow derived classes to overide this.
"""
assert self.debugPrint("changedOnState(isOn=%s)"%(isOn,))
##### state 0 #####
def enterState0(self):
assert self.debugPrint("enter0()")
self.enterStateN(0)
def exitState0(self):
assert self.debugPrint("exit0()")
# It's important for FourStates to broadcast their state
# when they are generated on the client. Before I put this in,
# if a door was generated and went directly to an 'open' state,
# it would not broadcast its state until it closed.
self.changedOnState(0)
##### state 1 #####
def enterState1(self):
assert self.debugPrint("enterState1()")
self.enterStateN(1)
def exitState1(self):
assert self.debugPrint("exitState1()")
##### state 2 #####
def enterState2(self):
assert self.debugPrint("enterState2()")
self.enterStateN(2)
def exitState2(self):
assert self.debugPrint("exitState2()")
##### state 3 #####
def enterState3(self):
assert self.debugPrint("enterState3()")
self.enterStateN(3)
def exitState3(self):
assert self.debugPrint("exitState3()")
##### state 4 #####
def enterState4(self):
assert self.debugPrint("enterState4()")
self.enterStateN(4)
self.changedOnState(1)
def exitState4(self):
assert self.debugPrint("exitState4()")
self.changedOnState(0)
if __debug__:
def debugPrint(self, message):
"""for debugging"""
return self.notify.debug("%d (%d) %s"%(
id(self), self.stateIndex==4, message))
| |
from datetime import datetime, timedelta
from django.test.utils import override_settings
from mock import Mock, patch
from nose.tools import eq_, ok_
import mkt
import mkt.site.tests
from mkt.constants.payments import PROVIDER_BANGO, PROVIDER_REFERENCE
from mkt.developers.models import (ActivityLog, AddonPaymentAccount,
CantCancel, PaymentAccount, PreloadTestPlan,
SolitudeSeller)
from mkt.developers.providers import get_provider
from mkt.site.fixtures import fixture
from mkt.site.utils import app_factory
from mkt.users.models import UserProfile
from mkt.webapps.models import Webapp
from .test_providers import Patcher
class TestActivityLogCount(mkt.site.tests.TestCase):
fixtures = fixture('webapp_337141', 'user_2519')
def setUp(self):
now = datetime.now()
bom = datetime(now.year, now.month, 1)
self.lm = bom - timedelta(days=1)
self.user = UserProfile.objects.filter()[0]
mkt.set_user(self.user)
def test_not_review_count(self):
mkt.log(mkt.LOG['EDIT_VERSION'], Webapp.objects.get())
eq_(len(ActivityLog.objects.monthly_reviews()), 0)
def test_review_count(self):
mkt.log(mkt.LOG['APPROVE_VERSION'], Webapp.objects.get())
result = ActivityLog.objects.monthly_reviews()
eq_(len(result), 1)
eq_(result[0]['approval_count'], 1)
eq_(result[0]['user'], self.user.pk)
def test_review_count_few(self):
for x in range(0, 5):
mkt.log(mkt.LOG['APPROVE_VERSION'], Webapp.objects.get())
result = ActivityLog.objects.monthly_reviews()
eq_(len(result), 1)
eq_(result[0]['approval_count'], 5)
def test_review_last_month(self):
log = mkt.log(mkt.LOG['APPROVE_VERSION'], Webapp.objects.get())
log.update(created=self.lm)
eq_(len(ActivityLog.objects.monthly_reviews()), 0)
def test_not_total(self):
mkt.log(mkt.LOG['EDIT_VERSION'], Webapp.objects.get())
eq_(len(ActivityLog.objects.total_reviews()), 0)
def test_total_few(self):
for x in range(0, 5):
mkt.log(mkt.LOG['APPROVE_VERSION'], Webapp.objects.get())
result = ActivityLog.objects.total_reviews()
eq_(len(result), 1)
eq_(result[0]['approval_count'], 5)
def test_total_last_month(self):
log = mkt.log(mkt.LOG['APPROVE_VERSION'], Webapp.objects.get())
log.update(created=self.lm)
result = ActivityLog.objects.total_reviews()
eq_(len(result), 1)
eq_(result[0]['approval_count'], 1)
eq_(result[0]['user'], self.user.pk)
def test_log_admin(self):
mkt.log(mkt.LOG['OBJECT_EDITED'], Webapp.objects.get())
eq_(len(ActivityLog.objects.admin_events()), 1)
eq_(len(ActivityLog.objects.for_developer()), 0)
def test_log_not_admin(self):
mkt.log(mkt.LOG['EDIT_VERSION'], Webapp.objects.get())
eq_(len(ActivityLog.objects.admin_events()), 0)
eq_(len(ActivityLog.objects.for_developer()), 1)
@override_settings(DEFAULT_PAYMENT_PROVIDER='bango',
PAYMENT_PROVIDERS=['bango'])
class TestPaymentAccount(Patcher, mkt.site.tests.TestCase):
fixtures = fixture('webapp_337141', 'user_999')
def setUp(self):
self.user = UserProfile.objects.filter()[0]
self.seller, self.solsel = self.create_solitude_seller()
super(TestPaymentAccount, self).setUp()
def create_solitude_seller(self, **kwargs):
solsel_patcher = patch('mkt.developers.models.SolitudeSeller.create')
solsel = solsel_patcher.start()
seller_params = {'resource_uri': 'selleruri', 'user': self.user}
seller_params.update(kwargs)
seller = SolitudeSeller.objects.create(**seller_params)
solsel.return_value = seller
solsel.patcher = solsel_patcher
return seller, solsel
def tearDown(self):
self.solsel.patcher.stop()
super(TestPaymentAccount, self).tearDown()
def test_create_bango(self):
# Return a seller object without hitting Bango.
self.bango_patcher.package.post.return_value = {
'resource_uri': 'zipzap',
'package_id': 123,
}
res = get_provider().account_create(
self.user, {'account_name': 'Test Account'})
eq_(res.name, 'Test Account')
eq_(res.user, self.user)
eq_(res.seller_uri, 'selleruri')
eq_(res.account_id, 123)
eq_(res.uri, 'zipzap')
self.bango_patcher.package.post.assert_called_with(
data={'paypalEmailAddress': 'nobody@example.com',
'seller': 'selleruri'})
self.bango_patcher.bank.post.assert_called_with(
data={'seller_bango': 'zipzap'})
def test_cancel(self):
res = PaymentAccount.objects.create(
name='asdf', user=self.user, uri='foo', seller_uri='uri1',
solitude_seller=self.seller)
addon = Webapp.objects.get()
AddonPaymentAccount.objects.create(
addon=addon, account_uri='foo',
payment_account=res, product_uri='bpruri')
assert addon.reload().status != mkt.STATUS_NULL
res.cancel(disable_refs=True)
assert res.inactive
assert addon.reload().status == mkt.STATUS_NULL
assert not AddonPaymentAccount.objects.exists()
def test_cancel_shared(self):
res = PaymentAccount.objects.create(
name='asdf', user=self.user, uri='foo',
solitude_seller=self.seller, shared=True)
addon = Webapp.objects.get()
AddonPaymentAccount.objects.create(
addon=addon, account_uri='foo',
payment_account=res, product_uri='bpruri')
with self.assertRaises(CantCancel):
res.cancel()
def test_cancel_multiple_accounts(self):
acct1 = PaymentAccount.objects.create(
name='asdf', user=self.user, uri='foo', seller_uri='uri1',
solitude_seller=self.seller, provider=PROVIDER_BANGO)
acct2 = PaymentAccount.objects.create(
name='fdsa', user=self.user, uri='bar', seller_uri='uri2',
solitude_seller=self.seller, provider=PROVIDER_REFERENCE)
addon = Webapp.objects.get(pk=337141)
AddonPaymentAccount.objects.create(
addon=addon, account_uri='foo',
payment_account=acct1, product_uri='bpruri')
still_around = AddonPaymentAccount.objects.create(
addon=addon, account_uri='bar',
payment_account=acct2, product_uri='asiuri')
ok_(addon.reload().status != mkt.STATUS_NULL)
acct1.cancel(disable_refs=True)
ok_(acct1.inactive)
ok_(addon.reload().status != mkt.STATUS_NULL)
pks = AddonPaymentAccount.objects.values_list('pk', flat=True)
eq_(len(pks), 1)
eq_(pks[0], still_around.pk)
def test_get_details(self):
package = Mock()
package.get.return_value = {'full': {'vendorName': 'a',
'some_other_value': 'b'}}
self.bango_patcher.package.return_value = package
res = PaymentAccount.objects.create(
name='asdf', user=self.user, uri='/foo/bar/123',
solitude_seller=self.seller)
deets = res.get_provider().account_retrieve(res)
eq_(deets['account_name'], res.name)
eq_(deets['vendorName'], 'a')
assert 'some_other_value' not in deets
self.bango_patcher.package.assert_called_with('123')
package.get.assert_called_with(data={'full': True})
def test_update_account_details(self):
res = PaymentAccount.objects.create(
name='asdf', user=self.user, uri='foo',
solitude_seller=self.seller)
res.get_provider().account_update(res, {
'account_name': 'new name',
'vendorName': 'new vendor name',
'something_other_value': 'not a package key'
})
eq_(res.name, 'new name')
self.bango_patcher.api.by_url(res.uri).patch.assert_called_with(
data={'vendorName': 'new vendor name'})
class TestPreloadTestPlan(mkt.site.tests.TestCase):
def setUp(self):
self.app = app_factory()
self.preload = self.app.preloadtestplan_set.create(filename='test.pdf')
def test_delete_cascade(self):
eq_(self.preload.addon, self.app)
self.app.delete()
eq_(PreloadTestPlan.objects.count(), 0)
| |
#!/usr/bin/env python
import argparse
import base64
import re
import logging
import time
from hashlib import sha1
from urlparse import parse_qs
from Cookie import SimpleCookie
import subprocess
import os
from saml2 import server
from saml2 import BINDING_HTTP_ARTIFACT
from saml2 import BINDING_URI
from saml2 import BINDING_PAOS
from saml2 import BINDING_SOAP
from saml2 import BINDING_HTTP_REDIRECT
from saml2 import BINDING_HTTP_POST
from saml2 import time_util
from saml2.authn_context import AuthnBroker
from saml2.authn_context import PASSWORD
from saml2.authn_context import UNSPECIFIED
from saml2.authn_context import authn_context_class_ref
from saml2.httputil import Response
from saml2.httputil import NotFound
from saml2.httputil import geturl
from saml2.httputil import get_post
from saml2.httputil import Redirect
from saml2.httputil import Unauthorized
from saml2.httputil import BadRequest
from saml2.httputil import ServiceError
from saml2.ident import Unknown
from saml2.metadata import create_metadata_string
from saml2.s_utils import rndstr, exception_trace
from saml2.s_utils import UnknownPrincipal
from saml2.s_utils import UnsupportedBinding
from saml2.s_utils import PolicyError
from saml2.sigver import verify_redirect_signature
logger = logging.getLogger("saml2.idp")
class Cache(object):
def __init__(self):
self.user2uid = {}
self.uid2user = {}
def _expiration(timeout, tformat="%a, %d-%b-%Y %H:%M:%S GMT"):
"""
:param timeout:
:param tformat:
:return:
"""
if timeout == "now":
return time_util.instant(tformat)
elif timeout == "dawn":
return time.strftime(tformat, time.gmtime(0))
else:
# validity time should match lifetime of assertions
return time_util.in_a_while(minutes=timeout, format=tformat)
def get_eptid(idp, req_info, session):
return idp.eptid.get(idp.config.entityid,
req_info.sender(), session["permanent_id"],
session["authn_auth"])
# -----------------------------------------------------------------------------
def dict2list_of_tuples(d):
return [(k, v) for k, v in d.items()]
# -----------------------------------------------------------------------------
class Service(object):
def __init__(self, environ, start_response, user=None):
self.environ = environ
logger.debug("ENVIRON: %s" % environ)
self.start_response = start_response
self.user = user
def unpack_redirect(self):
if "QUERY_STRING" in self.environ:
_qs = self.environ["QUERY_STRING"]
return dict([(k, v[0]) for k, v in parse_qs(_qs).items()])
else:
return None
def unpack_post(self):
_dict = parse_qs(get_post(self.environ))
logger.debug("unpack_post:: %s" % _dict)
try:
return dict([(k, v[0]) for k, v in _dict.items()])
except Exception:
return None
def unpack_soap(self):
try:
query = get_post(self.environ)
return {"SAMLRequest": query, "RelayState": ""}
except Exception:
return None
def unpack_either(self):
if self.environ["REQUEST_METHOD"] == "GET":
_dict = self.unpack_redirect()
elif self.environ["REQUEST_METHOD"] == "POST":
_dict = self.unpack_post()
else:
_dict = None
logger.debug("_dict: %s" % _dict)
return _dict
def operation(self, _dict, binding):
logger.debug("_operation: %s" % _dict)
if not _dict:
resp = BadRequest('Error parsing request or no request')
return resp(self.environ, self.start_response)
else:
return self.do(_dict["SAMLRequest"], binding, _dict["RelayState"])
def artifact_operation(self, _dict):
if not _dict:
resp = BadRequest("Missing query")
return resp(self.environ, self.start_response)
else:
# exchange artifact for request
request = IDP.artifact2message(_dict["SAMLart"], "spsso")
return self.do(request, BINDING_HTTP_ARTIFACT, _dict["RelayState"])
def response(self, binding, http_args):
if binding == BINDING_HTTP_ARTIFACT:
resp = Redirect()
else:
resp = Response(http_args["data"], headers=http_args["headers"])
return resp(self.environ, self.start_response)
def do(self, query, binding, relay_state=""):
pass
def redirect(self):
""" Expects a HTTP-redirect request """
_dict = self.unpack_redirect()
return self.operation(_dict, BINDING_HTTP_REDIRECT)
def post(self):
""" Expects a HTTP-POST request """
_dict = self.unpack_post()
return self.operation(_dict, BINDING_HTTP_POST)
def artifact(self):
# Can be either by HTTP_Redirect or HTTP_POST
_dict = self.unpack_either()
return self.artifact_operation(_dict)
def soap(self):
"""
Single log out using HTTP_SOAP binding
"""
logger.debug("- SOAP -")
_dict = self.unpack_soap()
logger.debug("_dict: %s" % _dict)
return self.operation(_dict, BINDING_SOAP)
def uri(self):
_dict = self.unpack_either()
return self.operation(_dict, BINDING_SOAP)
# def not_authn(self, key):
# """
#
#
# :return:
# """
# loc = "http://%s/login" % (self.environ["HTTP_HOST"])
# loc += "?%s" % urllib.urlencode({"came_from": self.environ[
# "PATH_INFO"], "key": key})
# headers = [('Content-Type', 'text/plain')]
#
# logger.debug("location: %s" % loc)
# logger.debug("headers: %s" % headers)
#
# resp = Redirect(loc, headers=headers)
#
# return resp(self.environ, self.start_response)
def not_authn(self, key, requested_authn_context):
ruri = geturl(self.environ, query=False)
return do_authentication(self.environ, self.start_response,
authn_context=requested_authn_context,
key=key, redirect_uri=ruri)
# -----------------------------------------------------------------------------
REPOZE_ID_EQUIVALENT = "uid"
FORM_SPEC = """<form name="myform" method="post" action="%s">
<input type="hidden" name="SAMLResponse" value="%s" />
<input type="hidden" name="RelayState" value="%s" />
</form>"""
# -----------------------------------------------------------------------------
# === Single log in ====
# -----------------------------------------------------------------------------
class AuthenticationNeeded(Exception):
def __init__(self, authn_context=None, *args, **kwargs):
Exception.__init__(*args, **kwargs)
self.authn_context = authn_context
class SSO(Service):
def __init__(self, environ, start_response, user=None):
Service.__init__(self, environ, start_response, user)
self.binding = ""
self.response_bindings = None
self.resp_args = {}
self.binding_out = None
self.destination = None
self.req_info = None
def verify_request(self, query, binding):
"""
:param query: The SAML query, transport encoded
:param binding: Which binding the query came in over
"""
resp_args = {}
if not query:
logger.info("Missing QUERY")
resp = Unauthorized('Unknown user')
return resp_args, resp(self.environ, self.start_response)
if not self.req_info:
self.req_info = IDP.parse_authn_request(query, binding)
logger.info("parsed OK")
_authn_req = self.req_info.message
logger.debug("%s" % _authn_req)
self.binding_out, self.destination = IDP.pick_binding(
"assertion_consumer_service",
bindings=self.response_bindings,
entity_id=_authn_req.issuer.text)
logger.debug("Binding: %s, destination: %s" % (self.binding_out,
self.destination))
resp_args = {}
try:
resp_args = IDP.response_args(_authn_req)
_resp = None
except UnknownPrincipal, excp:
_resp = IDP.create_error_response(_authn_req.id,
self.destination, excp)
except UnsupportedBinding, excp:
_resp = IDP.create_error_response(_authn_req.id,
self.destination, excp)
return resp_args, _resp
def do(self, query, binding_in, relay_state=""):
try:
resp_args, _resp = self.verify_request(query, binding_in)
except UnknownPrincipal, excp:
logger.error("UnknownPrincipal: %s" % (excp,))
resp = ServiceError("UnknownPrincipal: %s" % (excp,))
return resp(self.environ, self.start_response)
except UnsupportedBinding, excp:
logger.error("UnsupportedBinding: %s" % (excp,))
resp = ServiceError("UnsupportedBinding: %s" % (excp,))
return resp(self.environ, self.start_response)
if not _resp:
identity = USERS[self.user].copy()
#identity["eduPersonTargetedID"] = get_eptid(IDP, query, session)
logger.info("Identity: %s" % (identity,))
if REPOZE_ID_EQUIVALENT:
identity[REPOZE_ID_EQUIVALENT] = self.user
try:
_resp = IDP.create_authn_response(
identity, userid=self.user,
authn=AUTHN_BROKER[self.environ["idp.authn_ref"]],
**resp_args)
except Exception, excp:
logging.error(exception_trace(excp))
resp = ServiceError("Exception: %s" % (excp,))
return resp(self.environ, self.start_response)
logger.info("AuthNResponse: %s" % _resp)
http_args = IDP.apply_binding(self.binding_out,
"%s" % _resp, self.destination,
relay_state, response=True)
logger.debug("HTTPargs: %s" % http_args)
return self.response(self.binding_out, http_args)
def _store_request(self, _dict):
logger.debug("_store_request: %s" % _dict)
key = sha1(_dict["SAMLRequest"]).hexdigest()
# store the AuthnRequest
IDP.ticket[key] = _dict
return key
def redirect(self):
""" This is the HTTP-redirect endpoint """
logger.info("--- In SSO Redirect ---")
_info = self.unpack_redirect()
try:
_key = _info["key"]
_info = IDP.ticket[_key]
self.req_info = _info["req_info"]
del IDP.ticket[_key]
except KeyError:
self.req_info = IDP.parse_authn_request(_info["SAMLRequest"],
BINDING_HTTP_REDIRECT)
_req = self.req_info.message
if "SigAlg" in _info and "Signature" in _info: # Signed request
issuer = _req.issuer.text
_certs = IDP.metadata.certs(issuer, "any", "signing")
verified_ok = False
for cert in _certs:
if verify_redirect_signature(_info, cert):
verified_ok = True
break
if not verified_ok:
resp = BadRequest("Message signature verification failure")
return resp(self.environ, self.start_response)
if self.user:
if _req.force_authn:
_info["req_info"] = self.req_info
key = self._store_request(_info)
return self.not_authn(key, _req.requested_authn_context)
else:
return self.operation(_info, BINDING_HTTP_REDIRECT)
else:
_info["req_info"] = self.req_info
key = self._store_request(_info)
return self.not_authn(key, _req.requested_authn_context)
else:
return self.operation(_info, BINDING_HTTP_REDIRECT)
def post(self):
"""
The HTTP-Post endpoint
"""
logger.info("--- In SSO POST ---")
_info = self.unpack_either()
self.req_info = IDP.parse_authn_request(
_info["SAMLRequest"], BINDING_HTTP_POST)
_req = self.req_info.message
if self.user:
if _req.force_authn:
_info["req_info"] = self.req_info
key = self._store_request(_info)
return self.not_authn(key, _req.requested_authn_context)
else:
return self.operation(_info, BINDING_HTTP_POST)
else:
_info["req_info"] = self.req_info
key = self._store_request(_info)
return self.not_authn(key, _req.requested_authn_context)
# def artifact(self):
# # Can be either by HTTP_Redirect or HTTP_POST
# _req = self._store_request(self.unpack_either())
# if isinstance(_req, basestring):
# return self.not_authn(_req)
# return self.artifact_operation(_req)
def ecp(self):
# The ECP interface
logger.info("--- ECP SSO ---")
resp = None
try:
authz_info = self.environ["HTTP_AUTHORIZATION"]
if authz_info.startswith("Basic "):
_info = base64.b64decode(authz_info[6:])
logger.debug("Authz_info: %s" % _info)
try:
(user, passwd) = _info.split(":")
if PASSWD[user] != passwd:
resp = Unauthorized()
self.user = user
except ValueError:
resp = Unauthorized()
else:
resp = Unauthorized()
except KeyError:
resp = Unauthorized()
if resp:
return resp(self.environ, self.start_response)
_dict = self.unpack_soap()
self.response_bindings = [BINDING_PAOS]
# Basic auth ?!
return self.operation(_dict, BINDING_SOAP)
# -----------------------------------------------------------------------------
# === Authentication ====
# -----------------------------------------------------------------------------
def do_authentication(environ, start_response, authn_context, key,
redirect_uri):
"""
Display the login form
"""
logger.debug("Do authentication")
auth_info = AUTHN_BROKER.pick(authn_context)
if len(auth_info):
method, reference = auth_info[0]
logger.debug("Authn chosen: %s (ref=%s)" % (method, reference))
return method(environ, start_response, reference, key, redirect_uri)
else:
resp = Unauthorized("No usable authentication method")
return resp(environ, start_response)
# -----------------------------------------------------------------------------
PASSWD = {"haho0032": "qwerty",
"roland": "dianakra",
"babs": "howes",
"upper": "crust"}
def username_password_authn(environ, start_response, reference, key,
redirect_uri):
"""
Display the login form
"""
logger.info("The login page")
headers = []
resp = Response(mako_template="login.mako", template_lookup=LOOKUP,
headers=headers)
argv = {
"action": "/verify",
"login": "",
"password": "",
"key": key,
"authn_reference": reference,
"redirect_uri": redirect_uri
}
logger.info("do_authentication argv: %s" % argv)
return resp(environ, start_response, **argv)
def verify_username_and_password(dic):
global PASSWD
# verify username and password
if PASSWD[dic["login"][0]] == dic["password"][0]:
return True, dic["login"][0]
else:
return False, ""
def do_verify(environ, start_response, _):
query = parse_qs(get_post(environ))
logger.debug("do_verify: %s" % query)
try:
_ok, user = verify_username_and_password(query)
except KeyError:
_ok = False
user = None
if not _ok:
resp = Unauthorized("Unknown user or wrong password")
else:
uid = rndstr(24)
IDP.cache.uid2user[uid] = user
IDP.cache.user2uid[user] = uid
logger.debug("Register %s under '%s'" % (user, uid))
kaka = set_cookie("idpauthn", "/", uid, query["authn_reference"][0])
lox = "%s?id=%s&key=%s" % (query["redirect_uri"][0], uid,
query["key"][0])
logger.debug("Redirect => %s" % lox)
resp = Redirect(lox, headers=[kaka], content="text/html")
return resp(environ, start_response)
def not_found(environ, start_response):
"""Called if no URL matches."""
resp = NotFound()
return resp(environ, start_response)
# -----------------------------------------------------------------------------
# === Single log out ===
# -----------------------------------------------------------------------------
#def _subject_sp_info(req_info):
# # look for the subject
# subject = req_info.subject_id()
# subject = subject.text.strip()
# sp_entity_id = req_info.message.issuer.text.strip()
# return subject, sp_entity_id
class SLO(Service):
def do(self, request, binding, relay_state=""):
logger.info("--- Single Log Out Service ---")
try:
_, body = request.split("\n")
logger.debug("req: '%s'" % body)
req_info = IDP.parse_logout_request(body, binding)
except Exception, exc:
logger.error("Bad request: %s" % exc)
resp = BadRequest("%s" % exc)
return resp(self.environ, self.start_response)
msg = req_info.message
if msg.name_id:
lid = IDP.ident.find_local_id(msg.name_id)
logger.info("local identifier: %s" % lid)
del IDP.cache.uid2user[IDP.cache.user2uid[lid]]
del IDP.cache.user2uid[lid]
# remove the authentication
try:
IDP.session_db.remove_authn_statements(msg.name_id)
except KeyError, exc:
logger.error("ServiceError: %s" % exc)
resp = ServiceError("%s" % exc)
return resp(self.environ, self.start_response)
resp = IDP.create_logout_response(msg, [binding])
try:
hinfo = IDP.apply_binding(binding, "%s" % resp, "", relay_state)
except Exception, exc:
logger.error("ServiceError: %s" % exc)
resp = ServiceError("%s" % exc)
return resp(self.environ, self.start_response)
#_tlh = dict2list_of_tuples(hinfo["headers"])
delco = delete_cookie(self.environ, "idpauthn")
if delco:
hinfo["headers"].append(delco)
logger.info("Header: %s" % (hinfo["headers"],))
resp = Response(hinfo["data"], headers=hinfo["headers"])
return resp(self.environ, self.start_response)
# ----------------------------------------------------------------------------
# Manage Name ID service
# ----------------------------------------------------------------------------
class NMI(Service):
def do(self, query, binding, relay_state=""):
logger.info("--- Manage Name ID Service ---")
req = IDP.parse_manage_name_id_request(query, binding)
request = req.message
# Do the necessary stuff
name_id = IDP.ident.handle_manage_name_id_request(
request.name_id, request.new_id, request.new_encrypted_id,
request.terminate)
logger.debug("New NameID: %s" % name_id)
_resp = IDP.create_manage_name_id_response(request)
# It's using SOAP binding
hinfo = IDP.apply_binding(BINDING_SOAP, "%s" % _resp, "",
relay_state, response=True)
resp = Response(hinfo["data"], headers=hinfo["headers"])
return resp(self.environ, self.start_response)
# ----------------------------------------------------------------------------
# === Assertion ID request ===
# ----------------------------------------------------------------------------
# Only URI binding
class AIDR(Service):
def do(self, aid, binding, relay_state=""):
logger.info("--- Assertion ID Service ---")
try:
assertion = IDP.create_assertion_id_request_response(aid)
except Unknown:
resp = NotFound(aid)
return resp(self.environ, self.start_response)
hinfo = IDP.apply_binding(BINDING_URI, "%s" % assertion, response=True)
logger.debug("HINFO: %s" % hinfo)
resp = Response(hinfo["data"], headers=hinfo["headers"])
return resp(self.environ, self.start_response)
def operation(self, _dict, binding, **kwargs):
logger.debug("_operation: %s" % _dict)
if not _dict or "ID" not in _dict:
resp = BadRequest('Error parsing request or no request')
return resp(self.environ, self.start_response)
return self.do(_dict["ID"], binding, **kwargs)
# ----------------------------------------------------------------------------
# === Artifact resolve service ===
# ----------------------------------------------------------------------------
class ARS(Service):
def do(self, request, binding, relay_state=""):
_req = IDP.parse_artifact_resolve(request, binding)
msg = IDP.create_artifact_response(_req, _req.artifact.text)
hinfo = IDP.apply_binding(BINDING_SOAP, "%s" % msg, "", "",
response=True)
resp = Response(hinfo["data"], headers=hinfo["headers"])
return resp(self.environ, self.start_response)
# ----------------------------------------------------------------------------
# === Authn query service ===
# ----------------------------------------------------------------------------
# Only SOAP binding
class AQS(Service):
def do(self, request, binding, relay_state=""):
logger.info("--- Authn Query Service ---")
_req = IDP.parse_authn_query(request, binding)
_query = _req.message
msg = IDP.create_authn_query_response(_query.subject,
_query.requested_authn_context,
_query.session_index)
logger.debug("response: %s" % msg)
hinfo = IDP.apply_binding(BINDING_SOAP, "%s" % msg, "", "",
response=True)
resp = Response(hinfo["data"], headers=hinfo["headers"])
return resp(self.environ, self.start_response)
# ----------------------------------------------------------------------------
# === Attribute query service ===
# ----------------------------------------------------------------------------
# Only SOAP binding
class ATTR(Service):
def do(self, request, binding, relay_state=""):
logger.info("--- Attribute Query Service ---")
_req = IDP.parse_attribute_query(request, binding)
_query = _req.message
name_id = _query.subject.name_id
uid = name_id.text
logger.debug("Local uid: %s" % uid)
identity = EXTRA[uid]
# Comes in over SOAP so only need to construct the response
args = IDP.response_args(_query, [BINDING_SOAP])
msg = IDP.create_attribute_response(identity,
name_id=name_id, **args)
logger.debug("response: %s" % msg)
hinfo = IDP.apply_binding(BINDING_SOAP, "%s" % msg, "", "",
response=True)
resp = Response(hinfo["data"], headers=hinfo["headers"])
return resp(self.environ, self.start_response)
# ----------------------------------------------------------------------------
# Name ID Mapping service
# When an entity that shares an identifier for a principal with an identity
# provider wishes to obtain a name identifier for the same principal in a
# particular format or federation namespace, it can send a request to
# the identity provider using this protocol.
# ----------------------------------------------------------------------------
class NIM(Service):
def do(self, query, binding, relay_state=""):
req = IDP.parse_name_id_mapping_request(query, binding)
request = req.message
# Do the necessary stuff
try:
name_id = IDP.ident.handle_name_id_mapping_request(
request.name_id, request.name_id_policy)
except Unknown:
resp = BadRequest("Unknown entity")
return resp(self.environ, self.start_response)
except PolicyError:
resp = BadRequest("Unknown entity")
return resp(self.environ, self.start_response)
info = IDP.response_args(request)
_resp = IDP.create_name_id_mapping_response(name_id, **info)
# Only SOAP
hinfo = IDP.apply_binding(BINDING_SOAP, "%s" % _resp, "", "",
response=True)
resp = Response(hinfo["data"], headers=hinfo["headers"])
return resp(self.environ, self.start_response)
# ----------------------------------------------------------------------------
# Cookie handling
# ----------------------------------------------------------------------------
def info_from_cookie(kaka):
logger.debug("KAKA: %s" % kaka)
if kaka:
cookie_obj = SimpleCookie(kaka)
morsel = cookie_obj.get("idpauthn", None)
if morsel:
try:
key, ref = base64.b64decode(morsel.value).split(":")
return IDP.cache.uid2user[key], ref
except KeyError:
return None, None
else:
logger.debug("No idpauthn cookie")
return None, None
def delete_cookie(environ, name):
kaka = environ.get("HTTP_COOKIE", '')
logger.debug("delete KAKA: %s" % kaka)
if kaka:
cookie_obj = SimpleCookie(kaka)
morsel = cookie_obj.get(name, None)
cookie = SimpleCookie()
cookie[name] = ""
cookie[name]['path'] = "/"
logger.debug("Expire: %s" % morsel)
cookie[name]["expires"] = _expiration("dawn")
return tuple(cookie.output().split(": ", 1))
return None
def set_cookie(name, _, *args):
cookie = SimpleCookie()
cookie[name] = base64.b64encode(":".join(args))
cookie[name]['path'] = "/"
cookie[name]["expires"] = _expiration(5) # 5 minutes from now
logger.debug("Cookie expires: %s" % cookie[name]["expires"])
return tuple(cookie.output().split(": ", 1))
# ----------------------------------------------------------------------------
# map urls to functions
AUTHN_URLS = [
# sso
(r'sso/post$', (SSO, "post")),
(r'sso/post/(.*)$', (SSO, "post")),
(r'sso/redirect$', (SSO, "redirect")),
(r'sso/redirect/(.*)$', (SSO, "redirect")),
(r'sso/art$', (SSO, "artifact")),
(r'sso/art/(.*)$', (SSO, "artifact")),
# slo
(r'slo/redirect$', (SLO, "redirect")),
(r'slo/redirect/(.*)$', (SLO, "redirect")),
(r'slo/post$', (SLO, "post")),
(r'slo/post/(.*)$', (SLO, "post")),
(r'slo/soap$', (SLO, "soap")),
(r'slo/soap/(.*)$', (SLO, "soap")),
#
(r'airs$', (AIDR, "uri")),
(r'ars$', (ARS, "soap")),
# mni
(r'mni/post$', (NMI, "post")),
(r'mni/post/(.*)$', (NMI, "post")),
(r'mni/redirect$', (NMI, "redirect")),
(r'mni/redirect/(.*)$', (NMI, "redirect")),
(r'mni/art$', (NMI, "artifact")),
(r'mni/art/(.*)$', (NMI, "artifact")),
(r'mni/soap$', (NMI, "soap")),
(r'mni/soap/(.*)$', (NMI, "soap")),
# nim
(r'nim$', (NIM, "soap")),
(r'nim/(.*)$', (NIM, "soap")),
#
(r'aqs$', (AQS, "soap")),
(r'attr$', (ATTR, "soap"))
]
NON_AUTHN_URLS = [
#(r'login?(.*)$', do_authentication),
(r'verify?(.*)$', do_verify),
(r'sso/ecp$', (SSO, "ecp")),
]
# ----------------------------------------------------------------------------
def metadata(environ, start_response):
try:
path = args.path
if path is None or len(path) == 0:
path = os.path.dirname(os.path.abspath( __file__ ))
if path[-1] != "/":
path += "/"
metadata = create_metadata_string(path+args.config, IDP.config,
args.valid, args.cert, args.keyfile,
args.id, args.name, args.sign)
start_response('200 OK', [('Content-Type', "text/xml")])
return metadata
except Exception as ex:
logger.error("An error occured while creating metadata:" + ex.message)
return not_found(environ, start_response)
def application(environ, start_response):
"""
The main WSGI application. Dispatch the current request to
the functions from above and store the regular expression
captures in the WSGI environment as `myapp.url_args` so that
the functions from above can access the url placeholders.
If nothing matches call the `not_found` function.
:param environ: The HTTP application environment
:param start_response: The application to run when the handling of the
request is done
:return: The response as a list of lines
"""
path = environ.get('PATH_INFO', '').lstrip('/')
if path == "metadata":
return metadata(environ, start_response)
kaka = environ.get("HTTP_COOKIE", None)
logger.info("<application> PATH: %s" % path)
if kaka:
logger.info("= KAKA =")
user, authn_ref = info_from_cookie(kaka)
environ["idp.authn_ref"] = authn_ref
else:
try:
query = parse_qs(environ["QUERY_STRING"])
logger.debug("QUERY: %s" % query)
user = IDP.cache.uid2user[query["id"][0]]
except KeyError:
user = None
url_patterns = AUTHN_URLS
if not user:
logger.info("-- No USER --")
# insert NON_AUTHN_URLS first in case there is no user
url_patterns = NON_AUTHN_URLS + url_patterns
for regex, callback in url_patterns:
match = re.search(regex, path)
if match is not None:
try:
environ['myapp.url_args'] = match.groups()[0]
except IndexError:
environ['myapp.url_args'] = path
logger.debug("Callback: %s" % (callback,))
if isinstance(callback, tuple):
cls = callback[0](environ, start_response, user)
func = getattr(cls, callback[1])
return func()
return callback(environ, start_response, user)
return not_found(environ, start_response)
# ----------------------------------------------------------------------------
from mako.lookup import TemplateLookup
ROOT = './'
LOOKUP = TemplateLookup(directories=[ROOT + 'templates', ROOT + 'htdocs'],
module_directory=ROOT + 'modules',
input_encoding='utf-8', output_encoding='utf-8')
# ----------------------------------------------------------------------------
if __name__ == '__main__':
import sys
import socket
from idp_user import USERS
from idp_user import EXTRA
from wsgiref.simple_server import make_server
parser = argparse.ArgumentParser()
parser.add_argument('-p', dest='path', help='Path to configuration file.')
parser.add_argument('-v', dest='valid',
help="How long, in days, the metadata is valid from the time of creation")
parser.add_argument('-c', dest='cert', help='certificate')
parser.add_argument('-i', dest='id',
help="The ID of the entities descriptor")
parser.add_argument('-k', dest='keyfile',
help="A file with a key to sign the metadata with")
parser.add_argument('-n', dest='name')
parser.add_argument('-s', dest='sign', action='store_true',
help="sign the metadata")
parser.add_argument(dest="config")
args = parser.parse_args()
PORT = 8088
AUTHN_BROKER = AuthnBroker()
AUTHN_BROKER.add(authn_context_class_ref(PASSWORD),
username_password_authn, 10,
"http://%s" % socket.gethostname())
AUTHN_BROKER.add(authn_context_class_ref(UNSPECIFIED),
"", 0, "http://%s" % socket.gethostname())
IDP = server.Server(args.config, cache=Cache())
IDP.ticket = {}
SRV = make_server('', PORT, application)
print "IdP listening on port: %s" % PORT
SRV.serve_forever()
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PublicIPPrefixesOperations:
"""PublicIPPrefixesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
public_ip_prefix_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
public_ip_prefix_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified public IP prefix.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_prefix_name: The name of the PublicIpPrefix.
:type public_ip_prefix_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
public_ip_prefix_name=public_ip_prefix_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
async def get(
self,
resource_group_name: str,
public_ip_prefix_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.PublicIPPrefix":
"""Gets the specified public IP prefix in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_prefix_name: The name of the public IP prefix.
:type public_ip_prefix_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PublicIPPrefix, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_04_01.models.PublicIPPrefix
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPPrefix"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
public_ip_prefix_name: str,
parameters: "_models.PublicIPPrefix",
**kwargs: Any
) -> "_models.PublicIPPrefix":
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPPrefix"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PublicIPPrefix')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
public_ip_prefix_name: str,
parameters: "_models.PublicIPPrefix",
**kwargs: Any
) -> AsyncLROPoller["_models.PublicIPPrefix"]:
"""Creates or updates a static or dynamic public IP prefix.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_prefix_name: The name of the public IP prefix.
:type public_ip_prefix_name: str
:param parameters: Parameters supplied to the create or update public IP prefix operation.
:type parameters: ~azure.mgmt.network.v2019_04_01.models.PublicIPPrefix
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PublicIPPrefix or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_04_01.models.PublicIPPrefix]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPPrefix"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
public_ip_prefix_name=public_ip_prefix_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
public_ip_prefix_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.PublicIPPrefix":
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPPrefix"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
public_ip_prefix_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> AsyncLROPoller["_models.PublicIPPrefix"]:
"""Updates public IP prefix tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_prefix_name: The name of the public IP prefix.
:type public_ip_prefix_name: str
:param parameters: Parameters supplied to update public IP prefix tags.
:type parameters: ~azure.mgmt.network.v2019_04_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PublicIPPrefix or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_04_01.models.PublicIPPrefix]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPPrefix"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
public_ip_prefix_name=public_ip_prefix_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
def list_all(
self,
**kwargs: Any
) -> AsyncIterable["_models.PublicIPPrefixListResult"]:
"""Gets all the public IP prefixes in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PublicIPPrefixListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_04_01.models.PublicIPPrefixListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPPrefixListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPPrefixListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/publicIPPrefixes'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.PublicIPPrefixListResult"]:
"""Gets all public IP prefixes in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PublicIPPrefixListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_04_01.models.PublicIPPrefixListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPPrefixListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPPrefixListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes'} # type: ignore
| |
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import unittest
from tvcm import parse_html_deps
from tvcm import html_generation_controller
class ParseTests(unittest.TestCase):
def test_parse_empty(self):
parser = parse_html_deps.HTMLModuleParser()
module = parser.Parse('')
self.assertEquals([], module.scripts_external)
self.assertEquals([], module.inline_scripts)
self.assertEquals([], module.stylesheets)
self.assertEquals([], module.imports)
def test_parse_none(self):
parser = parse_html_deps.HTMLModuleParser()
module = parser.Parse(None)
self.assertEquals([], module.scripts_external)
self.assertEquals([], module.inline_scripts)
self.assertEquals([], module.stylesheets)
self.assertEquals([], module.imports)
def test_parse_script_src_basic(self):
html = """<!DOCTYPE html>
<html>
<head>
<script src="polymer.min.js"></script>
<script src="foo.js"></script>
</head>
<body>
</body>
</html>"""
parser = parse_html_deps.HTMLModuleParser()
module = parser.Parse(html)
self.assertEquals(['polymer.min.js', 'foo.js'], module.scripts_external)
self.assertEquals([], module.inline_scripts)
self.assertEquals([], module.stylesheets)
self.assertEquals([], module.imports)
self.assertTrue(module.has_decl)
self.assertNotIn(
'DOCTYPE html',
module.html_contents_without_links_and_script)
def test_parse_link_rel_import(self):
html = """<!DOCTYPE html>
<html>
<head>
<link rel="import" href="x-foo.html">
</head>
<body>
</body>
</html>"""
parser = parse_html_deps.HTMLModuleParser()
module = parser.Parse(html)
self.assertEquals([], module.scripts_external)
self.assertEquals([], module.inline_scripts)
self.assertEquals([], module.stylesheets)
self.assertEquals(['x-foo.html'], module.imports)
self.assertTrue(module.has_decl)
def test_parse_script_inline(self):
html = """<polymer-element name="tk-element-proto">
<template>
</template>
<script>
tvcm.require("foo");
tvcm.require('bar');
</script>
</polymer-element>"""
parser = parse_html_deps.HTMLModuleParser()
module = parser.Parse(html)
self.assertEquals([], module.scripts_external)
self.assertEquals(1, len(module.inline_scripts))
self.assertEquals([], module.stylesheets)
self.assertEquals([], module.imports)
self.assertFalse(module.has_decl)
script0 = module.inline_scripts[0]
val = re.sub(r'\s+', '', script0.contents)
inner_script = """tvcm.require("foo");tvcm.require('bar');"""
self.assertEquals(inner_script, val)
self.assertEquals(3, len(script0.open_tags))
self.assertEquals('polymer-element', script0.open_tags[2].tag)
self.assertNotIn(
'tvcm.require("foo");',
module.html_contents_without_links_and_script)
def test_parse_script_src_sripping(self):
html = """
<script src="blah.js"></script>
"""
module = parse_html_deps.HTMLModuleParser().Parse(html)
self.assertEquals('',
module.html_contents_without_links_and_script)
def test_parse_link_rel_stylesheet(self):
html = """<polymer-element name="hi">
<template>
<link rel="stylesheet" href="frameworkstyles.css">
</template>
</polymer-element>"""
parser = parse_html_deps.HTMLModuleParser()
module = parser.Parse(html)
self.assertEquals([], module.scripts_external)
self.assertEquals([], module.inline_scripts)
self.assertEquals(['frameworkstyles.css'], module.stylesheets)
self.assertEquals([], module.imports)
self.assertFalse(module.has_decl)
class Ctl(html_generation_controller.HTMLGenerationController):
def GetHTMLForStylesheetHRef(self, href):
if href == 'frameworkstyles.css':
return '<style>FRAMEWORK</style>'
return None
gen_html = module.GenerateHTML(Ctl())
ghtm = """<polymer-element name="hi">
<template>
<style>FRAMEWORK</style>
</template>
</polymer-element>"""
self.assertEquals(ghtm, gen_html)
def test_parse_inline_style(self):
html = """<style>
hello
</style>"""
module = parse_html_deps.HTMLModuleParser().Parse(html)
self.assertEquals(html, module.html_contents_without_links_and_script)
class Ctl(html_generation_controller.HTMLGenerationController):
def GetHTMLForInlineStylesheet(self, contents):
if contents == '\n hello\n':
return '\n HELLO\n'
return None
gen_html = module.GenerateHTML(Ctl())
ghtm = """<style>
HELLO
</style>"""
self.assertEquals(ghtm, gen_html)
def test_parse_style_import(self):
html = """<polymer-element name="x-blink">
<template>
<style>
@import url(awesome.css);
</style>
</template>
</polymer-element>"""
parser = parse_html_deps.HTMLModuleParser()
self.assertRaises(lambda: parser.Parse(html))
def test_nested_templates(self):
orig_html = """<template>
<template>
<div id="foo"></div>
</template>
</template>"""
parser = parse_html_deps.HTMLModuleParser()
res = parser.Parse(orig_html)
html = res.html_contents_without_links_and_script
self.assertEquals(html, orig_html)
def test_html_contents_basic(self):
html = """<a b="c">d</a>"""
parser = parse_html_deps.HTMLModuleParser()
module = parser.Parse(html)
self.assertEquals(html, module.html_contents_without_links_and_script)
def test_html_contents_with_entity(self):
html = """<a>→</a>"""
parser = parse_html_deps.HTMLModuleParser()
module = parser.Parse(html)
self.assertEquals(u'<a>\u2192</a>',
module.html_contents_without_links_and_script)
def test_html_content_with_charref(self):
html = """<a>></a>"""
parser = parse_html_deps.HTMLModuleParser()
module = parser.Parse(html)
self.assertEquals('<a>></a>',
module.html_contents_without_links_and_script)
def test_html_content_start_end_br(self):
html = """<a><br /></a>"""
parser = parse_html_deps.HTMLModuleParser()
module = parser.Parse(html)
self.assertEquals('<a><br/></a>',
module.html_contents_without_links_and_script)
def test_html_content_start_end_img(self):
html = """<a><img src="foo.png" id="bar" /></a>"""
parser = parse_html_deps.HTMLModuleParser()
module = parser.Parse(html)
self.assertEquals('<a><img id="bar" src="foo.png"/></a>',
module.html_contents_without_links_and_script)
def test_html_contents_with_link_stripping(self):
html = """<a b="c">d</a>
<link rel="import" href="x-foo.html">"""
parser = parse_html_deps.HTMLModuleParser()
module = parser.Parse(html)
self.assertEquals("""<a b="c">d</a>""",
module.html_contents_without_links_and_script.strip())
def test_html_contents_with_style_link_stripping(self):
html = """<a b="c">d</a>
<link rel="stylesheet" href="frameworkstyles.css">"""
parser = parse_html_deps.HTMLModuleParser()
module = parser.Parse(html)
self.assertEquals("""<a b="c">d</a>""",
module.html_contents_without_links_and_script.strip())
def test_br_does_not_raise(self):
html = """<div><br/></div>"""
parser = parse_html_deps.HTMLModuleParser()
parser.Parse(html)
def test_p_does_not_raises(self):
html = """<div></p></div>"""
parser = parse_html_deps.HTMLModuleParser()
parser.Parse(html)
def test_link_endlink_does_not_raise(self):
html = """<link rel="stylesheet" href="foo.css"></link>"""
parser = parse_html_deps.HTMLModuleParser()
parser.Parse(html)
def test_link_script_does_not_raise(self):
html = """<link rel="stylesheet" href="foo.css">
<script>
</script>"""
parser = parse_html_deps.HTMLModuleParser()
parser.Parse(html)
def test_script_with_script_inside_as_js(self):
html = """<script>
var html_lines = [
'<script>',
'<\/script>',
];
</script>"""
parser = parse_html_deps.HTMLModuleParser()
parser.Parse(html)
def test_invalid_script_escaping_raises(self):
html = """<script>
var html_lines = [
'<script>',
'< /script>',
];
</script>"""
parser = parse_html_deps.HTMLModuleParser()
def DoIt():
parser.Parse(html)
self.assertRaises(Exception, DoIt)
def test_script_with_cdata(self):
html = """<script></h2></script>"""
parser = parse_html_deps.HTMLModuleParser()
module = parser.Parse(html)
self.assertEquals(1, len(module.inline_scripts))
self.assertEquals('</h2>', module.inline_scripts[0].contents)
| |
'''More Goodness of fit tests
contains
GOF : 1 sample gof tests based on Stephens 1970, plus AD A^2
bootstrap : vectorized bootstrap p-values for gof test with fitted parameters
Created : 2011-05-21
Author : Josef Perktold
parts based on ks_2samp and kstest from scipy.stats
(license: Scipy BSD, but were completely rewritten by Josef Perktold)
References
----------
'''
from __future__ import print_function
from statsmodels.compat.python import range, lmap, string_types, callable
import numpy as np
from scipy.stats import distributions
from statsmodels.tools.decorators import cache_readonly
from scipy.special import kolmogorov as ksprob
#from scipy.stats unchanged
def ks_2samp(data1, data2):
"""
Computes the Kolmogorov-Smirnof statistic on 2 samples.
This is a two-sided test for the null hypothesis that 2 independent samples
are drawn from the same continuous distribution.
Parameters
----------
a, b : sequence of 1-D ndarrays
two arrays of sample observations assumed to be drawn from a continuous
distribution, sample sizes can be different
Returns
-------
D : float
KS statistic
p-value : float
two-tailed p-value
Notes
-----
This tests whether 2 samples are drawn from the same distribution. Note
that, like in the case of the one-sample K-S test, the distribution is
assumed to be continuous.
This is the two-sided test, one-sided tests are not implemented.
The test uses the two-sided asymptotic Kolmogorov-Smirnov distribution.
If the K-S statistic is small or the p-value is high, then we cannot
reject the hypothesis that the distributions of the two samples
are the same.
Examples
--------
>>> from scipy import stats
>>> import numpy as np
>>> from scipy.stats import ks_2samp
>>> #fix random seed to get the same result
>>> np.random.seed(12345678);
>>> n1 = 200 # size of first sample
>>> n2 = 300 # size of second sample
different distribution
we can reject the null hypothesis since the pvalue is below 1%
>>> rvs1 = stats.norm.rvs(size=n1,loc=0.,scale=1);
>>> rvs2 = stats.norm.rvs(size=n2,loc=0.5,scale=1.5)
>>> ks_2samp(rvs1,rvs2)
(0.20833333333333337, 4.6674975515806989e-005)
slightly different distribution
we cannot reject the null hypothesis at a 10% or lower alpha since
the pvalue at 0.144 is higher than 10%
>>> rvs3 = stats.norm.rvs(size=n2,loc=0.01,scale=1.0)
>>> ks_2samp(rvs1,rvs3)
(0.10333333333333333, 0.14498781825751686)
identical distribution
we cannot reject the null hypothesis since the pvalue is high, 41%
>>> rvs4 = stats.norm.rvs(size=n2,loc=0.0,scale=1.0)
>>> ks_2samp(rvs1,rvs4)
(0.07999999999999996, 0.41126949729859719)
"""
data1, data2 = lmap(np.asarray, (data1, data2))
n1 = data1.shape[0]
n2 = data2.shape[0]
n1 = len(data1)
n2 = len(data2)
data1 = np.sort(data1)
data2 = np.sort(data2)
data_all = np.concatenate([data1,data2])
#reminder: searchsorted inserts 2nd into 1st array
cdf1 = np.searchsorted(data1,data_all,side='right')/(1.0*n1)
cdf2 = (np.searchsorted(data2,data_all,side='right'))/(1.0*n2)
d = np.max(np.absolute(cdf1-cdf2))
#Note: d absolute not signed distance
en = np.sqrt(n1*n2/float(n1+n2))
try:
prob = ksprob((en+0.12+0.11/en)*d)
except:
prob = 1.0
return d, prob
#from scipy.stats unchanged
def kstest(rvs, cdf, args=(), N=20, alternative = 'two_sided', mode='approx',**kwds):
"""
Perform the Kolmogorov-Smirnov test for goodness of fit
This performs a test of the distribution G(x) of an observed
random variable against a given distribution F(x). Under the null
hypothesis the two distributions are identical, G(x)=F(x). The
alternative hypothesis can be either 'two_sided' (default), 'less'
or 'greater'. The KS test is only valid for continuous distributions.
Parameters
----------
rvs : string or array or callable
string: name of a distribution in scipy.stats
array: 1-D observations of random variables
callable: function to generate random variables, requires keyword
argument `size`
cdf : string or callable
string: name of a distribution in scipy.stats, if rvs is a string then
cdf can evaluate to `False` or be the same as rvs
callable: function to evaluate cdf
args : tuple, sequence
distribution parameters, used if rvs or cdf are strings
N : int
sample size if rvs is string or callable
alternative : 'two_sided' (default), 'less' or 'greater'
defines the alternative hypothesis (see explanation)
mode : 'approx' (default) or 'asymp'
defines the distribution used for calculating p-value
'approx' : use approximation to exact distribution of test statistic
'asymp' : use asymptotic distribution of test statistic
Returns
-------
D : float
KS test statistic, either D, D+ or D-
p-value : float
one-tailed or two-tailed p-value
Notes
-----
In the one-sided test, the alternative is that the empirical
cumulative distribution function of the random variable is "less"
or "greater" than the cumulative distribution function F(x) of the
hypothesis, G(x)<=F(x), resp. G(x)>=F(x).
Examples
--------
>>> from scipy import stats
>>> import numpy as np
>>> from scipy.stats import kstest
>>> x = np.linspace(-15,15,9)
>>> kstest(x,'norm')
(0.44435602715924361, 0.038850142705171065)
>>> np.random.seed(987654321) # set random seed to get the same result
>>> kstest('norm','',N=100)
(0.058352892479417884, 0.88531190944151261)
is equivalent to this
>>> np.random.seed(987654321)
>>> kstest(stats.norm.rvs(size=100),'norm')
(0.058352892479417884, 0.88531190944151261)
Test against one-sided alternative hypothesis:
>>> np.random.seed(987654321)
Shift distribution to larger values, so that cdf_dgp(x)< norm.cdf(x):
>>> x = stats.norm.rvs(loc=0.2, size=100)
>>> kstest(x,'norm', alternative = 'less')
(0.12464329735846891, 0.040989164077641749)
Reject equal distribution against alternative hypothesis: less
>>> kstest(x,'norm', alternative = 'greater')
(0.0072115233216311081, 0.98531158590396395)
Don't reject equal distribution against alternative hypothesis: greater
>>> kstest(x,'norm', mode='asymp')
(0.12464329735846891, 0.08944488871182088)
Testing t distributed random variables against normal distribution:
With 100 degrees of freedom the t distribution looks close to the normal
distribution, and the kstest does not reject the hypothesis that the sample
came from the normal distribution
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(100,size=100),'norm')
(0.072018929165471257, 0.67630062862479168)
With 3 degrees of freedom the t distribution looks sufficiently different
from the normal distribution, that we can reject the hypothesis that the
sample came from the normal distribution at a alpha=10% level
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(3,size=100),'norm')
(0.131016895759829, 0.058826222555312224)
"""
if isinstance(rvs, string_types):
#cdf = getattr(stats, rvs).cdf
if (not cdf) or (cdf == rvs):
cdf = getattr(distributions, rvs).cdf
rvs = getattr(distributions, rvs).rvs
else:
raise AttributeError('if rvs is string, cdf has to be the same distribution')
if isinstance(cdf, string_types):
cdf = getattr(distributions, cdf).cdf
if callable(rvs):
kwds = {'size':N}
vals = np.sort(rvs(*args,**kwds))
else:
vals = np.sort(rvs)
N = len(vals)
cdfvals = cdf(vals, *args)
if alternative in ['two_sided', 'greater']:
Dplus = (np.arange(1.0, N+1)/N - cdfvals).max()
if alternative == 'greater':
return Dplus, distributions.ksone.sf(Dplus,N)
if alternative in ['two_sided', 'less']:
Dmin = (cdfvals - np.arange(0.0, N)/N).max()
if alternative == 'less':
return Dmin, distributions.ksone.sf(Dmin,N)
if alternative == 'two_sided':
D = np.max([Dplus,Dmin])
if mode == 'asymp':
return D, distributions.kstwobign.sf(D*np.sqrt(N))
if mode == 'approx':
pval_two = distributions.kstwobign.sf(D*np.sqrt(N))
if N > 2666 or pval_two > 0.80 - N*0.3/1000.0 :
return D, distributions.kstwobign.sf(D*np.sqrt(N))
else:
return D, distributions.ksone.sf(D,N)*2
#TODO: split into modification and pvalue functions separately ?
# for separate testing and combining different pieces
def dplus_st70_upp(stat, nobs):
mod_factor = np.sqrt(nobs) + 0.12 + 0.11 / np.sqrt(nobs)
stat_modified = stat * mod_factor
pval = np.exp(-2 * stat_modified**2)
digits = np.sum(stat > np.array([0.82, 0.82, 1.00]))
#repeat low to get {0,2,3}
return stat_modified, pval, digits
dminus_st70_upp = dplus_st70_upp
def d_st70_upp(stat, nobs):
mod_factor = np.sqrt(nobs) + 0.12 + 0.11 / np.sqrt(nobs)
stat_modified = stat * mod_factor
pval = 2 * np.exp(-2 * stat_modified**2)
digits = np.sum(stat > np.array([0.91, 0.91, 1.08]))
#repeat low to get {0,2,3}
return stat_modified, pval, digits
def v_st70_upp(stat, nobs):
mod_factor = np.sqrt(nobs) + 0.155 + 0.24 / np.sqrt(nobs)
#repeat low to get {0,2,3}
stat_modified = stat * mod_factor
zsqu = stat_modified**2
pval = (8 * zsqu - 2) * np.exp(-2 * zsqu)
digits = np.sum(stat > np.array([1.06, 1.06, 1.26]))
return stat_modified, pval, digits
def wsqu_st70_upp(stat, nobs):
nobsinv = 1. / nobs
stat_modified = (stat - 0.4 * nobsinv + 0.6 * nobsinv**2) * (1 + nobsinv)
pval = 0.05 * np.exp(2.79 - 6 * stat_modified)
digits = np.nan # some explanation in txt
#repeat low to get {0,2,3}
return stat_modified, pval, digits
def usqu_st70_upp(stat, nobs):
nobsinv = 1. / nobs
stat_modified = (stat - 0.1 * nobsinv + 0.1 * nobsinv**2)
stat_modified *= (1 + 0.8 * nobsinv)
pval = 2 * np.exp(- 2 * stat_modified * np.pi**2)
digits = np.sum(stat > np.array([0.29, 0.29, 0.34]))
#repeat low to get {0,2,3}
return stat_modified, pval, digits
def a_st70_upp(stat, nobs):
nobsinv = 1. / nobs
stat_modified = (stat - 0.7 * nobsinv + 0.9 * nobsinv**2)
stat_modified *= (1 + 1.23 * nobsinv)
pval = 1.273 * np.exp(- 2 * stat_modified / 2. * np.pi**2)
digits = np.sum(stat > np.array([0.11, 0.11, 0.452]))
#repeat low to get {0,2,3}
return stat_modified, pval, digits
gof_pvals = {}
gof_pvals['stephens70upp'] = {
'd_plus' : dplus_st70_upp,
'd_minus' : dplus_st70_upp,
'd' : d_st70_upp,
'v' : v_st70_upp,
'wsqu' : wsqu_st70_upp,
'usqu' : usqu_st70_upp,
'a' : a_st70_upp }
def pval_kstest_approx(D, N):
pval_two = distributions.kstwobign.sf(D*np.sqrt(N))
if N > 2666 or pval_two > 0.80 - N*0.3/1000.0 :
return D, distributions.kstwobign.sf(D*np.sqrt(N)), np.nan
else:
return D, distributions.ksone.sf(D,N)*2, np.nan
gof_pvals['scipy'] = {
'd_plus' : lambda Dplus, N: (Dplus, distributions.ksone.sf(Dplus, N), np.nan),
'd_minus' : lambda Dmin, N: (Dmin, distributions.ksone.sf(Dmin,N), np.nan),
'd' : lambda D, N: (D, distributions.kstwobign.sf(D*np.sqrt(N)), np.nan)
}
gof_pvals['scipy_approx'] = {
'd' : pval_kstest_approx }
class GOF(object):
'''One Sample Goodness of Fit tests
includes Kolmogorov-Smirnov D, D+, D-, Kuiper V, Cramer-von Mises W^2, U^2 and
Anderson-Darling A, A^2. The p-values for all tests except for A^2 are based on
the approximatiom given in Stephens 1970. A^2 has currently no p-values. For
the Kolmogorov-Smirnov test the tests as given in scipy.stats are also available
as options.
design: I might want to retest with different distributions, to calculate
data summary statistics only once, or add separate class that holds
summary statistics and data (sounds good).
'''
def __init__(self, rvs, cdf, args=(), N=20):
if isinstance(rvs, string_types):
#cdf = getattr(stats, rvs).cdf
if (not cdf) or (cdf == rvs):
cdf = getattr(distributions, rvs).cdf
rvs = getattr(distributions, rvs).rvs
else:
raise AttributeError('if rvs is string, cdf has to be the same distribution')
if isinstance(cdf, string_types):
cdf = getattr(distributions, cdf).cdf
if callable(rvs):
kwds = {'size':N}
vals = np.sort(rvs(*args,**kwds))
else:
vals = np.sort(rvs)
N = len(vals)
cdfvals = cdf(vals, *args)
self.nobs = N
self.vals_sorted = vals
self.cdfvals = cdfvals
@cache_readonly
def d_plus(self):
nobs = self.nobs
cdfvals = self.cdfvals
return (np.arange(1.0, nobs+1)/nobs - cdfvals).max()
@cache_readonly
def d_minus(self):
nobs = self.nobs
cdfvals = self.cdfvals
return (cdfvals - np.arange(0.0, nobs)/nobs).max()
@cache_readonly
def d(self):
return np.max([self.d_plus, self.d_minus])
@cache_readonly
def v(self):
'''Kuiper'''
return self.d_plus + self.d_minus
@cache_readonly
def wsqu(self):
'''Cramer von Mises'''
nobs = self.nobs
cdfvals = self.cdfvals
#use literal formula, TODO: simplify with arange(,,2)
wsqu = ((cdfvals - (2. * np.arange(1., nobs+1) - 1)/nobs/2.)**2).sum() \
+ 1./nobs/12.
return wsqu
@cache_readonly
def usqu(self):
nobs = self.nobs
cdfvals = self.cdfvals
#use literal formula, TODO: simplify with arange(,,2)
usqu = self.wsqu - nobs * (cdfvals.mean() - 0.5)**2
return usqu
@cache_readonly
def a(self):
nobs = self.nobs
cdfvals = self.cdfvals
#one loop instead of large array
msum = 0
for j in range(1,nobs):
mj = cdfvals[j] - cdfvals[:j]
mask = (mj > 0.5)
mj[mask] = 1 - mj[mask]
msum += mj.sum()
a = nobs / 4. - 2. / nobs * msum
return a
@cache_readonly
def asqu(self):
'''Stephens 1974, doesn't have p-value formula for A^2'''
nobs = self.nobs
cdfvals = self.cdfvals
asqu = -((2. * np.arange(1., nobs+1) - 1) *
(np.log(cdfvals) + np.log(1-cdfvals[::-1]) )).sum()/nobs - nobs
return asqu
def get_test(self, testid='d', pvals='stephens70upp'):
'''
'''
#print gof_pvals[pvals][testid]
stat = getattr(self, testid)
if pvals == 'stephens70upp':
return gof_pvals[pvals][testid](stat, self.nobs), stat
else:
return gof_pvals[pvals][testid](stat, self.nobs)
def gof_mc(randfn, distr, nobs=100):
#print '\nIs it correctly sized?'
from collections import defaultdict
results = defaultdict(list)
for i in range(1000):
rvs = randfn(nobs)
goft = GOF(rvs, distr)
for ti in all_gofs:
results[ti].append(goft.get_test(ti, 'stephens70upp')[0][1])
resarr = np.array([results[ti] for ti in all_gofs])
print(' ', ' '.join(all_gofs))
print('at 0.01:', (resarr < 0.01).mean(1))
print('at 0.05:', (resarr < 0.05).mean(1))
print('at 0.10:', (resarr < 0.1).mean(1))
def asquare(cdfvals, axis=0):
'''vectorized Anderson Darling A^2, Stephens 1974'''
ndim = len(cdfvals.shape)
nobs = cdfvals.shape[axis]
slice_reverse = [slice(None)] * ndim #might make copy if not specific axis???
islice = [None] * ndim
islice[axis] = slice(None)
slice_reverse[axis] = slice(None, None, -1)
asqu = -((2. * np.arange(1., nobs+1)[islice] - 1) *
(np.log(cdfvals) + np.log(1-cdfvals[slice_reverse]))/nobs).sum(axis) \
- nobs
return asqu
#class OneSGOFFittedVec(object):
# '''for vectorized fitting'''
# currently I use the bootstrap as function instead of full class
#note: kwds loc and scale are a pain
# I would need to overwrite rvs, fit and cdf depending on fixed parameters
#def bootstrap(self, distr, args=(), kwds={}, nobs=200, nrep=1000,
def bootstrap(distr, args=(), nobs=200, nrep=100, value=None, batch_size=None):
'''Monte Carlo (or parametric bootstrap) p-values for gof
currently hardcoded for A^2 only
assumes vectorized fit_vec method,
builds and analyses (nobs, nrep) sample in one step
rename function to less generic
this works also with nrep=1
'''
#signature similar to kstest ?
#delegate to fn ?
#rvs_kwds = {'size':(nobs, nrep)}
#rvs_kwds.update(kwds)
#it will be better to build a separate batch function that calls bootstrap
#keep batch if value is true, but batch iterate from outside if stat is returned
if (not batch_size is None):
if value is None:
raise ValueError('using batching requires a value')
n_batch = int(np.ceil(nrep/float(batch_size)))
count = 0
for irep in range(n_batch):
rvs = distr.rvs(args, **{'size':(batch_size, nobs)})
params = distr.fit_vec(rvs, axis=1)
params = lmap(lambda x: np.expand_dims(x, 1), params)
cdfvals = np.sort(distr.cdf(rvs, params), axis=1)
stat = asquare(cdfvals, axis=1)
count += (stat >= value).sum()
return count / float(n_batch * batch_size)
else:
#rvs = distr.rvs(args, **kwds) #extension to distribution kwds ?
rvs = distr.rvs(args, **{'size':(nrep, nobs)})
params = distr.fit_vec(rvs, axis=1)
params = lmap(lambda x: np.expand_dims(x, 1), params)
cdfvals = np.sort(distr.cdf(rvs, params), axis=1)
stat = asquare(cdfvals, axis=1)
if value is None: #return all bootstrap results
stat_sorted = np.sort(stat)
return stat_sorted
else: #calculate and return specific p-value
return (stat >= value).mean()
def bootstrap2(value, distr, args=(), nobs=200, nrep=100):
'''Monte Carlo (or parametric bootstrap) p-values for gof
currently hardcoded for A^2 only
non vectorized, loops over all parametric bootstrap replications and calculates
and returns specific p-value,
rename function to less generic
'''
#signature similar to kstest ?
#delegate to fn ?
#rvs_kwds = {'size':(nobs, nrep)}
#rvs_kwds.update(kwds)
count = 0
for irep in range(nrep):
#rvs = distr.rvs(args, **kwds) #extension to distribution kwds ?
rvs = distr.rvs(args, **{'size':nobs})
params = distr.fit_vec(rvs)
cdfvals = np.sort(distr.cdf(rvs, params))
stat = asquare(cdfvals, axis=0)
count += (stat >= value)
return count * 1. / nrep
class NewNorm(object):
'''just a holder for modified distributions
'''
def fit_vec(self, x, axis=0):
return x.mean(axis), x.std(axis)
def cdf(self, x, args):
return distributions.norm.cdf(x, loc=args[0], scale=args[1])
def rvs(self, args, size):
loc=args[0]
scale=args[1]
return loc + scale * distributions.norm.rvs(size=size)
if __name__ == '__main__':
from scipy import stats
#rvs = np.random.randn(1000)
rvs = stats.t.rvs(3, size=200)
print('scipy kstest')
print(kstest(rvs, 'norm'))
goft = GOF(rvs, 'norm')
print(goft.get_test())
all_gofs = ['d', 'd_plus', 'd_minus', 'v', 'wsqu', 'usqu', 'a']
for ti in all_gofs:
print(ti, goft.get_test(ti, 'stephens70upp'))
print('\nIs it correctly sized?')
from collections import defaultdict
results = defaultdict(list)
nobs = 200
for i in range(100):
rvs = np.random.randn(nobs)
goft = GOF(rvs, 'norm')
for ti in all_gofs:
results[ti].append(goft.get_test(ti, 'stephens70upp')[0][1])
resarr = np.array([results[ti] for ti in all_gofs])
print(' ', ' '.join(all_gofs))
print('at 0.01:', (resarr < 0.01).mean(1))
print('at 0.05:', (resarr < 0.05).mean(1))
print('at 0.10:', (resarr < 0.1).mean(1))
gof_mc(lambda nobs: stats.t.rvs(3, size=nobs), 'norm', nobs=200)
nobs = 200
nrep = 100
bt = bootstrap(NewNorm(), args=(0,1), nobs=nobs, nrep=nrep, value=None)
quantindex = np.floor(nrep * np.array([0.99, 0.95, 0.9])).astype(int)
print(bt[quantindex])
#the bootstrap results match Stephens pretty well for nobs=100, but not so well for
#large (1000) or small (20) nobs
'''
>>> np.array([15.0, 10.0, 5.0, 2.5, 1.0])/100. #Stephens
array([ 0.15 , 0.1 , 0.05 , 0.025, 0.01 ])
>>> nobs = 100
>>> [bootstrap(NewNorm(), args=(0,1), nobs=nobs, nrep=10000, value=c/ (1 + 4./nobs - 25./nobs**2)) for c in [0.576, 0.656, 0.787, 0.918, 1.092]]
[0.1545, 0.10009999999999999, 0.049000000000000002, 0.023, 0.0104]
>>>
'''
#test equality of loop, vectorized, batch-vectorized
np.random.seed(8765679)
resu1 = bootstrap(NewNorm(), args=(0,1), nobs=nobs, nrep=100,
value=0.576/(1 + 4./nobs - 25./nobs**2))
np.random.seed(8765679)
tmp = [bootstrap(NewNorm(), args=(0,1), nobs=nobs, nrep=1) for _ in range(100)]
resu2 = (np.array(tmp) > 0.576/(1 + 4./nobs - 25./nobs**2)).mean()
np.random.seed(8765679)
tmp = [bootstrap(NewNorm(), args=(0,1), nobs=nobs, nrep=1,
value=0.576/ (1 + 4./nobs - 25./nobs**2),
batch_size=10) for _ in range(10)]
resu3 = np.array(resu).mean()
from numpy.testing import assert_almost_equal, assert_array_almost_equal
assert_array_almost_equal(resu1, resu2, 15)
assert_array_almost_equal(resu2, resu3, 15)
| |
# -*- coding: utf-8 -*-
'''ordering test mixins'''
class stooges: #@IgnorePep8
name = 'moe'
age = 40
class stoog2: #@IgnorePep8
name = 'larry'
age = 50
class stoog3: #@IgnorePep8
name = 'curly'
age = 60
class stoog4: #@IgnorePep8
name = 'beastly'
age = 969
class MathMixin(object):
def test_pipe(self):
one = self.mclass(10, 5, 100, 2, 1000)
two = self.pipe()
test = one.minmax().pipe(two).merge().back().min().get()
self.assertEqual(test, 2, test)
test = one.original().minmax().pipe(two).merge().back().max().get()
self.assertEqual(test, 1000, test)
test = one.original().minmax().pipe(two).merge().back().sum().get()
self.assertEqual(test, 1002, test)
def test_average(self):
self.assertEqual(
self.mclass(10, 40, 45).average().get(), 31.666666666666668,
)
def test_count(self):
common = self.mclass(11, 3, 5, 11, 7, 3, 11).count().get()
self.assertEqual(common.overall, [(11, 3), (3, 2), (5, 1), (7, 1)])
# most common
self.assertEqual(common.most, 11)
# least common
self.assertEqual(common.least, 7)
def test_max(self):
from stuf import stuf
stooge = [
stuf(name='moe', age=40),
stuf(name='larry', age=50),
stuf(name='curly', age=60),
]
self.assertEqual(self.mclass(1, 2, 4).max().get(), 4)
self.assertEqual(
stuf(self.mclass(*stooge).worker(lambda x: x.age).max().get()),
stuf(name='curly', age=60),
)
def test_median(self):
self.assertEqual(self.mclass(4, 5, 7, 2, 1).median().get(), 4)
self.assertEqual(self.mclass(4, 5, 7, 2, 1, 8).median().get(), 4.5)
def test_min(self):
self.assertEqual(self.mclass(10, 5, 100, 2, 1000).min().get(), 2)
self.assertEqual(
self.mclass(10, 5, 100, 2, 1000).worker(
lambda x: x % 100 == 0
).min().get(),
10,
)
def test_minmax(self):
self.assertEqual(self.mclass(1, 2, 4).minmax().get(), (1, 4))
self.assertEqual(
self.mclass(10, 5, 100, 2, 1000).minmax().get(), (2, 1000),
)
def test_range(self):
self.assertEqual(self.mclass(3, 5, 7, 3, 11).range().get(), 8)
def test_sum(self):
self.assertEqual(self.mclass(1, 2, 3).sum().get(), 6)
self.assertEqual(self.mclass(1, 2, 3).sum(1).get(), 7)
self.assertEqual(
self.mclass(
.1, .1, .1, .1, .1, .1, .1, .1, .1, .1
).sum(precision=True).get(),
1.0,
)
class CmpMixin(object):
def test_all(self):
from operator import truth
self.assertFalse(
self.mclass(True, 1, None, 'yes').worker(truth).all().get()
)
self.assertTrue(
self.mclass(2, 4, 6, 8).worker(lambda x: x % 2 == 0).all().get()
)
def test_any(self):
self.assertTrue(
self.mclass(None, 0, 'yes', False).worker(bool).any().get()
)
self.assertTrue(
self.mclass(1, 4, 5, 9).worker(lambda x: x % 2 == 0).any().get()
)
def test_difference(self):
self.assertEqual(
self.mclass(
[1, 2, 3, 4, 5], [5, 2, 10], [10, 11, 2]
).difference().get(),
[1, 3, 4],
)
self.assertEqual(
self.mclass(
[1, 3, 4, 5], [5, 2, 10], [10, 11, 2]
).difference(True).get(),
[1, 3, 4, 11]
)
def test_intersection(self):
self.assertEqual(
self.mclass(
[1, 2, 3], [101, 2, 1, 10], [2, 1]
).intersection().get(), [1, 2],
)
def test_union(self):
self.assertEqual(
self.mclass([1, 2, 3], [101, 2, 1, 10], [2, 1]).union().get(),
[1, 10, 3, 2, 101],
)
def test_unique(self):
self.assertEqual(
self.mclass(1, 2, 1, 3, 1, 4).unique().get(), [1, 2, 3, 4],
)
self.assertEqual(
self.mclass(1, 2, 1, 3, 1, 4).worker(round).unique().get(),
[1, 2, 3, 4],
)
class OrderMixin(object):
def test_shuffle(self):
self.assertEqual(
len(self.mclass(1, 2, 3, 4, 5, 6).shuffle()),
len([5, 4, 6, 3, 1, 2]),
)
def test_group(self,):
self.assertEqual(
self.mclass(1.3, 2.1, 2.4).group().get(),
[(1.3, (1.3,)), (2.1, (2.1,)), (2.4, (2.4,))],
)
from math import floor
self.assertEqual(
self.mclass(1.3, 2.1, 2.4).worker(floor).group().get(),
[(1.0, (1.3,)), (2.0, (2.1, 2.4))]
)
def test_combo(self):
self.assertEqual(
self.mclass(5, 4, 3, 2, 1).reverse().sort().get(), [1, 2, 3, 4, 5]
)
def test_reverse(self):
self.assertEqual(
self.mclass(5, 4, 3, 2, 1).reverse().get(), [1, 2, 3, 4, 5],
)
def test_sort(self):
from math import sin
self.assertEqual(
self.mclass(1, 2, 3, 4, 5, 6).worker(sin).sort().get(),
[5, 4, 6, 3, 1, 2],
)
self.assertEqual(
self.mclass(4, 6, 65, 3, 63, 2, 4).sort().get(),
[2, 3, 4, 4, 6, 63, 65],
)
class FilterMixin(object):
def test_pattern(self):
test = self.mclass(
'This is the first test',
'This is the second test',
'This is the third test',
).pattern('{} first {}')
self.assertEqual(
test.filter().get(), 'This is the first test'
)
self.assertEqual(
test.original().pattern(
'. third .', type='regex'
).filter().get(), 'This is the third test'
)
self.assertEqual(
test.original().pattern(
'* second *', type='glob'
).filter().get(), 'This is the second test'
)
def test_traverse(self):
from knife._compat import ChainMap, OrderedDict
get = self.mclass(stooges, stoog2, stoog3).traverse().get()
self.assertEqual(
get,
[ChainMap(OrderedDict([
('classname', 'stooges'), ('age', 40), ('name', 'moe'),
])),
ChainMap(OrderedDict([
('classname', 'stoog2'), ('age', 50), ('name', 'larry'),
])),
ChainMap(
OrderedDict([
('classname', 'stoog3'), ('age', 60), ('name', 'curly'),
]),
OrderedDict([
('age', 969), ('name', 'beastly'), ('classname', 'stoog4'),
])
)],
)
def test(x): #@IgnorePep8
if x[0] == 'name':
return True
elif x[0].startswith('__'):
return True
return False
self.assertEqual(
self.mclass(
stooges, stoog2, stoog3
).worker(test).traverse(True).get(),
[ChainMap(OrderedDict([('classname', 'stooges'), ('age', 40)])),
ChainMap(OrderedDict([('classname', 'stoog2'), ('age', 50)])),
ChainMap(
OrderedDict([('classname', 'stoog3'), ('age', 60)]),
OrderedDict([('classname', 'stoog4'), ('age', 969)])
)],
)
def test_attributes(self):
from stuf import stuf
stooge = [
stuf(name='moe', age=40),
stuf(name='larry', age=50),
stuf(name='curly', age=60)
]
self.assertEqual(
self.mclass(*stooge).attrs('name').get(),
['moe', 'larry', 'curly'],
)
self.assertEqual(
self.mclass(*stooge).attrs('name', 'age').get(),
[('moe', 40), ('larry', 50), ('curly', 60)],
)
self.assertEqual(
self.mclass(*stooge).attrs('place').get(), [],
)
def test_items(self):
from stuf import stuf
stooge = [
stuf(name='moe', age=40),
stuf(name='larry', age=50),
stuf(name='curly', age=60)
]
self.assertEqual(
self.mclass(*stooge).items('name').get(),
['moe', 'larry', 'curly'],
)
self.assertEqual(
self.mclass(*stooge).items('name', 'age').get(),
[('moe', 40), ('larry', 50), ('curly', 60)],
)
stooge = [['moe', 40], ['larry', 50], ['curly', 60]]
self.assertEqual(
self.mclass(*stooge).items(0).get(), ['moe', 'larry', 'curly'],
)
self.assertEqual(self.mclass(*stooge).items(1).get(), [40, 50, 60])
self.assertEqual(self.mclass(*stooge).items('place').get(), [])
def test_filter(self):
self.assertEqual(
self.mclass(1, 2, 3, 4, 5, 6).worker(
lambda x: x % 2 == 0
).filter(invert=True).get(), [1, 3, 5]
)
self.assertEqual(
self.mclass(1, 2, 3, 4, 5, 6).worker(
lambda x: x % 2 == 0
).filter().get(), [2, 4, 6]
)
def test_duality(self):
self.assertEqual(
self.mclass(1, 2, 3, 4, 5, 6).worker(
lambda x: x % 2 == 0
).duality().get(),
((2, 4, 6), (1, 3, 5))
)
class SliceMixin(object):
def test_dice(self):
self.assertEqual(
self.mclass(
'moe', 'larry', 'curly', 30, 40, 50, True
).dice(2, 'x').get(),
[('moe', 'larry'), ('curly', 30), (40, 50), (True, 'x')]
)
def test_first(self):
self.assertEqual(self.mclass(5, 4, 3, 2, 1).first().get(), 5)
self.assertEqual(self.mclass(5, 4, 3, 2, 1).first(2).get(), [5, 4])
def test_combo(self):
self.assertEqual(self.mclass(
5, 4, 3, 2, 1).initial().rest().slice(1, 2).last().get(), 3,
)
def test_index(self):
self.assertEqual(self.mclass(5, 4, 3, 2, 1).at(2).get(), 3)
self.assertEqual(self.mclass(5, 4, 3, 2, 1).at(10, 11).get(), 11)
def test_slice(self):
self.assertEqual(self.mclass(5, 4, 3, 2, 1).slice(2).get(), [5, 4])
self.assertEqual(
self.mclass(5, 4, 3, 2, 1).slice(2, 4).get(), [3, 2]
)
self.assertEqual(
self.mclass(5, 4, 3, 2, 1).slice(2, 4, 2).get(), 3
)
def test_last(self):
self.assertEqual(self.mclass(5, 4, 3, 2, 1).last().get(), 1)
self.assertEqual(self.mclass(5, 4, 3, 2, 1).last(2).get(), [2, 1])
def test_initial(self):
self.assertEqual(
self.mclass(5, 4, 3, 2, 1).initial().get(), [5, 4, 3, 2]
)
def test_rest(self):
self.assertEqual(
self.mclass(5, 4, 3, 2, 1).rest().get(), [4, 3, 2, 1],
)
def test_choice(self):
self.assertEqual(
len(list(self.mclass(1, 2, 3, 4, 5, 6).choice())), 1,
)
def test_sample(self):
self.assertEqual(
len(self.mclass(1, 2, 3, 4, 5, 6).sample(3).get()), 3,
)
class ReduceMixin(object):
def test_flatten(self):
self.assertEqual(
self.mclass([[1, [2], [3, [[4]]]], 'here']).flatten().get(),
[1, 2, 3, 4, 'here'],
)
def test_merge(self):
self.assertEqual(
self.mclass(
['moe', 'larry', 'curly'], [30, 40, 50], [True, False, False]
).merge().get(),
['moe', 'larry', 'curly', 30, 40, 50, True, False, False],
)
def test_reduce(self):
self.assertEqual(
self.mclass(1, 2, 3).worker(lambda x, y: x + y).reduce().get(),
6,
)
self.assertEqual(
self.mclass(1, 2, 3).worker(lambda x, y: x + y).reduce(1).get(),
7,
)
self.assertEqual(
self.mclass([0, 1], [2, 3], [4, 5]).worker(
lambda x, y: x + y
).reduce(reverse=True).get(), [4, 5, 2, 3, 0, 1],
)
self.assertEqual(
self.mclass([0, 1], [2, 3], [4, 5]).worker(
lambda x, y: x + y
).reduce([0, 0], True).get(), [4, 5, 2, 3, 0, 1, 0, 0],
)
def test_zip(self):
# auto
self.assertEqual(
self.mclass(
['moe', 'larry', 'curly'], [30, 40, 50], [True, False, False]
).zip().get(),
[('moe', 30, True), ('larry', 40, False), ('curly', 50, False)],
)
class RepeatMixin(object):
def test_repeat(self):
def test(*args): #@IgnorePep8
return list(args)
self.assertEqual(
self.mclass(40, 50, 60).repeat(3).get(),
[(40, 50, 60), (40, 50, 60), (40, 50, 60)],
)
self.assertEqual(
self.mclass(40, 50, 60).worker(test).repeat(3, True).get(),
[[40, 50, 60], [40, 50, 60], [40, 50, 60]],
)
def test_copy(self):
testlist = [[1, [2, 3]], [4, [5, 6]]]
newlist = self.mclass(testlist).copy().get()
self.assertFalse(newlist is testlist)
self.assertListEqual(newlist, testlist)
self.assertFalse(newlist[0] is testlist[0])
self.assertListEqual(newlist[0], testlist[0])
self.assertFalse(newlist[1] is testlist[1])
self.assertListEqual(newlist[1], testlist[1])
def test_permutations(self):
self.assertEqual(
self.mclass(40, 50, 60).permutate(2).get(),
[(40, 50), (40, 60), (50, 40), (50, 60), (60, 40), (60, 50)],
)
def test_combination(self):
self.assertEqual(
self.mclass(40, 50, 60).combinate(2).get(),
[(40, 50), (40, 60), (50, 60)],
)
class MapMixin(object):
def test_factory(self):
from stuf import stuf
thing = self.mclass(
[('a', 1), ('b', 2), ('c', 3)], [('a', 1), ('b', 2), ('c', 3)]
).worker(stuf).map().get()
self.assertEqual(
thing, [stuf(a=1, b=2, c=3), stuf(a=1, b=2, c=3)]
)
def test_kwargmap(self):
def test(*args, **kw):
return sum(args) * sum(kw.values())
self.assertEqual(
self.mclass(
((1, 2), {'a': 2}), ((2, 3), {'a': 2}), ((3, 4), {'a': 2})
).worker(test).kwargmap().get(),
[6, 10, 14],
)
self.assertEqual(
self.mclass(
((1, 2), {'a': 2}), ((2, 3), {'a': 2}), ((3, 4), {'a': 2})
).worker(test).params(
1, 2, 3, b=5, w=10, y=13
).kwargmap(True).get(),
[270, 330, 390],
)
def test_argmap(self):
self.assertEqual(
self.mclass(
(1, 2), (2, 3), (3, 4)
).worker(lambda x, y: x * y).argmap().get(),
[2, 6, 12],
)
self.assertEqual(
self.mclass((1, 2), (2, 3), (3, 4)).worker(
lambda x, y, z, a, b: x * y * z * a * b
).params(7, 8, 9).argmap(True).get(),
[1008, 3024, 6048],
)
def test_map(self):
self.assertEqual(
self.mclass(1, 2, 3).worker(lambda x: x * 3).map().get(),
[3, 6, 9],
)
def test_invoke(self):
self.assertEqual(
self.mclass(
[5, 1, 7], [3, 2, 1]
).params(1).invoke('index').get(),
[1, 2],
)
self.assertEqual(
self.mclass([5, 1, 7], [3, 2, 1]).invoke('sort').get(),
[[1, 5, 7], [1, 2, 3]],
)
def test_mapping(self):
self.assertEqual(
self.mclass(
dict([(1, 2), (2, 3), (3, 4)]), dict([(1, 2), (2, 3), (3, 4)])
).mapping(True).get(), [1, 2, 3, 1, 2, 3],
)
self.assertEqual(
self.mclass(
dict([(1, 2), (2, 3), (3, 4)]), dict([(1, 2), (2, 3), (3, 4)])
).mapping(values=True).get(),
[2, 3, 4, 2, 3, 4],
)
self.assertEqual(
self.mclass(
dict([(1, 2), (2, 3), (3, 4)]), dict([(1, 2), (2, 3), (3, 4)])
).worker(lambda x, y: x * y).mapping().get(),
[2, 6, 12, 2, 6, 12],
)
class Mixin(object):
def test_repr(self):
from stuf.six import strings
self.assertIsInstance(
self.mclass([1, 2, 3, 4, 5, 6]).__repr__(), strings,
)
def test_append(self):
self.assertEqual(self.mclass().append('foo').peek(), 'foo')
self.assertListEqual(
self.mclass().append(1, 2, 3, 4, 5, 6).peek(),
[1, 2, 3, 4, 5, 6],
)
def test_prepend(self):
self.assertEqual(self.mclass().prepend('foo').peek(), 'foo')
self.assertListEqual(
self.mclass().prepend(1, 2, 3, 4, 5, 6).peek(), [1, 2, 3, 4, 5, 6]
)
def test_undo(self):
queue = self.mclass(1, 2, 3).prepend(1, 2, 3, 4, 5, 6)
self.assertEqual(queue.peek(), [1, 2, 3, 4, 5, 6, 1, 2, 3])
queue.append(1).undo()
self.assertEqual(queue.peek(), [1, 2, 3, 4, 5, 6, 1, 2, 3])
queue.append(1).append(2).undo()
self.assertEqual(queue.peek(), [1, 2, 3, 4, 5, 6, 1, 2, 3, 1])
queue.append(1).append(2).undo(2)
self.assertEqual(queue.peek(), [1, 2, 3, 4, 5, 6, 1, 2, 3, 1])
queue.snapshot().append(1).append(2).baseline()
self.assertEqual(queue.peek(), [1, 2, 3, 4, 5, 6, 1, 2, 3, 1])
queue.original()
self.assertEqual(queue.peek(), [1, 2, 3])
def test_wrap(self):
self.assertIsInstance(
self.mclass(1, 2, 3, 4, 5, 6).wrap(tuple).peek(), tuple,
)
self.assertTupleEqual(
self.mclass(1, 2, 3, 4, 5, 6).wrap(tuple).peek(),
(1, 2, 3, 4, 5, 6),
)
def test_ascii(self):
from stuf.six import u, b
self.assertEqual(
self.mclass(
[1], True, r't', b('i'), u('g'), None, (1,)
).ascii().oneach().peek(),
[b('[1]'), b('True'), b('t'), b('i'), b('g'), b('None'), b('(1,)')]
)
def test_bytes(self):
from stuf.six import u, b
self.assertEqual(
self.mclass(
[1], True, r't', b('i'), u('g'), None, (1,)
).bytes().oneach().peek(),
[b('[1]'), b('True'), b('t'), b('i'), b('g'), b('None'), b('(1,)')]
)
def test_unicode(self):
from stuf.six import u, b
self.assertEqual(
self.mclass(
[1], True, r't', b('i'), u('g'), None, (1,)
).unicode().oneach().peek(),
[u('[1]'), u('True'), u('t'), u('i'), u('g'), u('None'), u('(1,)')]
)
| |
"""
String Constants used in Hops-Util: Environment variables, Kafka Config, SSL Config etc.
"""
class HTTP_CONFIG:
"""
HTTP String constants
"""
HTTP_CONTENT_TYPE = "Content-type"
HTTP_APPLICATION_JSON = "application/json"
HTTP_AUTHORIZATION = "Authorization"
HTTP_POST = "POST"
HTTP_PUT = "PUT"
HTTP_GET = "GET"
HTTP_DELETE = "DELETE"
HTTP_UNAUTHORIZED = 401
class ENV_VARIABLES:
"""
Environment variable names (accessible in os.environ)
"""
KAFKA_BROKERS_ENV_VAR = "KAFKA_BROKERS"
ELASTIC_ENDPOINT_ENV_VAR = "ELASTIC_ENDPOINT"
PWD_ENV_VAR = "PWD"
KAFKA_VERSION_ENV_VAR = "KAFKA_VERSION"
LIVY_VERSION_ENV_VAR = "LIVY_VERSION"
SPARK_VERSION_ENV_VAR = "SPARK_VERSION"
REST_ENDPOINT_END_VAR = "REST_ENDPOINT"
TENSORFLOW_VERSION_ENV_VAR = "TENSORFLOW_VERSION"
CUDA_VERSION_ENV_VAR = "CUDA_VERSION"
HOPSWORKS_VERSION_ENV_VAR = "HOPSWORKS_VERSION"
HADOOP_VERSION_ENV_VAR = "HADOOP_VERSION"
HADOOP_USER_NAME_ENV_VAR = "HADOOP_USER_NAME"
HADOOP_HOME = "HADOOP_HOME"
HADOOP_CLASSPATH_GLOB = "HADOOP_CLASSPATH_GLOB"
HDFS_USER_ENV_VAR = "HDFS_USER"
HOPSWORKS_USER_ENV_VAR = "HOPSWORKS_USER"
PATH_ENV_VAR = "PATH"
PYTHONPATH_ENV_VAR = "PYTHONPATH"
JOB_NAME_ENV_VAR = "HOPSWORKS_JOB_NAME"
KERNEL_ID_ENV_VAR = "HOPSWORKS_KERNEL_ID"
HOPSWORKS_PROJECT_ID_ENV_VAR = "HOPSWORKS_PROJECT_ID"
HOPSWORKS_PROJECT_NAME_ENV_VAR = "HOPSWORKS_PROJECT_NAME"
API_KEY_ENV_VAR = "API_KEY"
REGION_NAME_ENV_VAR = "REGION_NAME"
FLINK_CONF_DIR = "FLINK_CONF_DIR"
FLINK_LIB_DIR = "FLINK_LIB_DIR"
REQUESTS_VERIFY = "REQUESTS_VERIFY"
REQUESTS_VERIFY_ENV_VAR = "REQUESTS_VERIFY"
DOMAIN_CA_TRUSTSTORE_ENV_VAR = "DOMAIN_CA_TRUSTSTORE"
DOMAIN_CA_TRUSTSTORE_PEM_ENV_VAR = "DOMAIN_CA_TRUSTSTORE_PEM"
SECRETS_DIR_ENV_VAR = "SECRETS_DIR"
SPARK_IS_DRIVER = "IS_HOPS_DRIVER"
class KAFKA_SSL_CONFIG:
"""
Kafka SSL constant strings for configuration
"""
SSL = "SSL"
SSL_TRUSTSTORE_LOCATION_CONFIG = "ssl.truststore.location"
SSL_TRUSTSTORE_LOCATION_DOC = "The location of the trust store file. "
SSL_TRUSTSTORE_PASSWORD_CONFIG = "ssl.truststore.password"
SSL_TRUSTSTORE_PASSWORD_DOC = "The password for the trust store file. If a password is not set access to the truststore is still available, but integrity checking is disabled."
SSL_KEYSTORE_LOCATION_CONFIG = "ssl.keystore.location"
SSL_KEYSTORE_PASSWORD_CONFIG = "ssl.keystore.password"
SSL_KEY_PASSWORD_CONFIG = "ssl.key.password"
SECURITY_PROTOCOL_CONFIG = "security.protocol"
SSL_CERTIFICATE_LOCATION_CONFIG = "ssl.certificate.location"
SSL_CA_LOCATION_CONFIG = "ssl.ca.location"
SSL_PRIVATE_KEY_LOCATION_CONFIG = "ssl.key.location"
SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG = "ssl.endpoint.identification.algorithm"
# General SSL config properties
class SSL_CONFIG:
"""
General SSL configuration constants for Hops-TLS
"""
KEYSTORE_SUFFIX = "__kstore.jks"
TRUSTSTORE_SUFFIX = "__tstore.jks"
PASSWORD_SUFFIX = "__cert.key"
K_CERTIFICATE_CONFIG = "k_certificate"
T_CERTIFICATE_CONFIG = "t_certificate"
PEM_CLIENT_CERTIFICATE_CONFIG = "client.pem"
PEM_CLIENT_KEY_CONFIG = "client_key.pem"
PEM_CA_CHAIN_CERTIFICATE_CONFIG = "ca_chain.pem"
DOMAIN_CA_TRUSTSTORE = "domain_ca_truststore"
CRYPTO_MATERIAL_PASSWORD = "material_passwd"
PEM_CA_ROOT_CERT = "/srv/hops/kagent/host-certs/hops_root_ca.pem"
SSL_ENABLED = "ipc.server.ssl.enabled"
class KAFKA_PRODUCER_CONFIG:
"""
Constant strings for Kafka producers
"""
BOOTSTRAP_SERVERS_CONFIG = "bootstrap.servers"
KEY_SERIALIZER_CLASS_CONFIG = "key.serializer"
VALUE_SERIALIZER_CLASS_CONFIG = "value.serializer"
class KAFKA_CONSUMER_CONFIG:
"""
Constant strings for Kafka consumers
"""
GROUP_ID_CONFIG = "group.id"
ENABLE_AUTO_COMMIT_CONFIG = "enable.auto.commit"
AUTO_COMMIT_INTERVAL_MS_CONFIG = "auto.commit.interval.ms"
SESSION_TIMEOUT_MS_CONFIG = "session.timeout.ms"
KEY_DESERIALIZER_CLASS_CONFIG = "key.deserializer"
VALUE_DESERIALIZER_CLASS_CONFIG = "value.deserializer"
AUTO_OFFSET_RESET_CONFIG = "auto.offset.reset"
ENABLE_AUTO_COMMIT_CONFIG = "enable.auto.commit"
KEY_DESERIALIZER_CLASS_CONFIG = "key.deserializer"
VALUE_DESERIALIZER_CLASS_CONFIG = "value.deserializer"
class SPARK_CONFIG:
"""
Spark string constants
"""
SPARK_SCHEMA_FIELD_METADATA = "metadata"
SPARK_SCHEMA_FIELDS = "fields"
SPARK_SCHEMA_FIELD_NAME = "name"
SPARK_SCHEMA_FIELD_TYPE = "type"
SPARK_SCHEMA_ELEMENT_TYPE = "elementType"
SPARK_OVERWRITE_MODE = "overwrite"
SPARK_APPEND_MODE = "append"
SPARK_WRITE_DELIMITER = "delimiter"
SPARK_INFER_SCHEMA = "inferSchema"
SPARK_WRITE_HEADER = "header"
SPARK_TF_CONNECTOR_RECORD_TYPE = "recordType"
SPARK_TF_CONNECTOR_RECORD_TYPE_EXAMPLE = "Example"
SPARK_LONG_TYPE = "long"
SPARK_SHORT_TYPE = "short"
SPARK_BYTE_TYPE = "byte"
SPARK_INTEGER_TYPE = "integer"
SPARK_INT_TYPE = "int"
SPARK_FLOAT_TYPE = "float"
SPARK_DOUBLE_TYPE = 'double'
SPARK_DECIMAL_TYPE = "decimal"
SPARK_BIGINT_TYPE = "bigint"
SPARK_SMALLINT_TYPE = "smallint"
SPARK_STRING_TYPE = "string"
SPARK_BINARY_TYPE = "binary"
SPARK_NUMERIC_TYPES = [SPARK_BIGINT_TYPE,
SPARK_DECIMAL_TYPE,
SPARK_INTEGER_TYPE,
SPARK_INT_TYPE,
SPARK_DOUBLE_TYPE,
SPARK_LONG_TYPE,
SPARK_FLOAT_TYPE,
SPARK_SHORT_TYPE]
SPARK_STRUCT = "struct"
SPARK_ARRAY = "array"
SPARK_ARRAY_DOUBLE = "array<double>"
SPARK_ARRAY_INTEGER = "array<integer>"
SPARK_ARRAY_INT = "array<int>"
SPARK_ARRAY_BIGINT = "array<bigint>"
SPARK_ARRAY_FLOAT = "array<float>"
SPARK_ARRAY_DECIMAL = "array<decimal>"
SPARK_ARRAY_STRING = "array<string>"
SPARK_ARRAY_LONG = "array<long>"
SPARK_ARRAY_BINARY = "array<binary>"
SPARK_VECTOR = "vector"
SPARK_SQL_CATALOG_IMPLEMENTATION = "spark.sql.catalogImplementation"
SPARK_SQL_CATALOG_HIVE = "hive"
SPARK_JDBC_FORMAT = "jdbc"
SPARK_JDBC_URL = "url"
SPARK_JDBC_DBTABLE = "dbtable"
SPARK_JDBC_USER = "user"
SPARK_JDBC_PW = "password"
class MODEL_SERVING:
MODELS_DATASET = "Models"
MODEL_SERVER_TENSORFLOW_SERVING = "TENSORFLOW_SERVING"
MODEL_SERVER_FLASK = "FLASK"
MODEL_SERVERS = [MODEL_SERVER_TENSORFLOW_SERVING, MODEL_SERVER_FLASK]
SERVING_TOOL_DEFAULT = "DEFAULT"
SERVING_TOOL_KFSERVING = "KFSERVING"
SERVING_TOOLS = [SERVING_TOOL_DEFAULT, SERVING_TOOL_KFSERVING]
INFERENCE_LOGGING_MODEL_INPUTS = "MODEL_INPUTS"
INFERENCE_LOGGING_PREDICTIONS = "PREDICTIONS"
INFERENCE_LOGGING_ALL = "ALL"
INFERENCE_LOGGING_MODES = [INFERENCE_LOGGING_MODEL_INPUTS, INFERENCE_LOGGING_PREDICTIONS, INFERENCE_LOGGING_ALL]
SERVING_ACTION_START = "START"
SERVING_ACTION_STOP = "STOP"
SERVING_ACTIONS = [SERVING_ACTION_STOP, SERVING_ACTION_STOP]
SERVING_START_OR_STOP_PATH_PARAM = "?action="
class FEATURE_STORE:
"""
Featurestore constants
"""
TRAINING_DATASET_PROVENANCE_FEATUREGROUP = "featuregroup"
TRAINING_DATASET_PROVENANCE_VERSION = "version"
MAX_CORRELATION_MATRIX_COLUMNS = 50
TRAINING_DATASET_CSV_FORMAT = "csv"
TRAINING_DATASET_TSV_FORMAT = "tsv"
TRAINING_DATASET_PARQUET_FORMAT = "parquet"
TRAINING_DATASET_TFRECORDS_FORMAT = "tfrecords"
TRAINING_DATASET_TFRECORD_FORMAT = "tfrecord"
TRAINING_DATASET_AVRO_FORMAT = "avro"
TRAINING_DATASET_ORC_FORMAT = "orc"
TRAINING_DATASET_NPY_FORMAT = "npy"
TRAINING_DATASET_IMAGE_FORMAT = "image"
TRAINING_DATASET_HDF5_FORMAT = "hdf5"
TRAINING_DATASET_PETASTORM_FORMAT = "petastorm"
TRAINING_DATASET_NPY_SUFFIX = ".npy"
TRAINING_DATASET_HDF5_SUFFIX = ".hdf5"
TRAINING_DATASET_CSV_SUFFIX = ".csv"
TRAINING_DATASET_TSV_SUFFIX = ".tsv"
TRAINING_DATASET_PARQUET_SUFFIX = ".parquet"
TRAINING_DATASET_AVRO_SUFFIX = ".avro"
TRAINING_DATASET_ORC_SUFFIX = ".orc"
TRAINING_DATASET_IMAGE_SUFFIX = ".image"
TRAINING_DATASET_TFRECORDS_SUFFIX = ".tfrecords"
TRAINING_DATASET_PETASTORM_SUFFIX = ".petastorm"
TRAINING_DATASET_SUPPORTED_FORMATS = [
TRAINING_DATASET_TSV_FORMAT,
TRAINING_DATASET_CSV_FORMAT,
TRAINING_DATASET_PARQUET_FORMAT,
TRAINING_DATASET_TFRECORDS_FORMAT,
TRAINING_DATASET_TFRECORD_FORMAT,
TRAINING_DATASET_NPY_FORMAT,
TRAINING_DATASET_HDF5_FORMAT,
TRAINING_DATASET_AVRO_FORMAT,
TRAINING_DATASET_ORC_FORMAT,
TRAINING_DATASET_IMAGE_FORMAT,
TRAINING_DATASET_PETASTORM_FORMAT
]
FEATURE_GROUP_INSERT_APPEND_MODE = "append"
FEATURE_GROUP_INSERT_OVERWRITE_MODE = "overwrite"
FEATURESTORE_SUFFIX = "_featurestore"
TRAINING_DATASETS_SUFFIX = "_Training_Datasets"
TRAINING_DATASET_TF_RECORD_SCHEMA_FILE_NAME = "tf_record_schema.txt"
TF_RECORD_SCHEMA_FEATURE = "feature"
TF_RECORD_SCHEMA_FEATURE_FIXED = "fixed_len"
TF_RECORD_SCHEMA_FEATURE_VAR = "var_len"
TF_RECORD_SCHEMA_TYPE = "type"
TF_RECORD_SCHEMA_SHAPE = "shape"
TF_RECORD_INT_TYPE = "int"
TF_RECORD_FLOAT_TYPE = "float"
TF_RECORD_STRING_TYPE = "string"
TF_RECORD_INT_ARRAY_SPARK_TYPES = [SPARK_CONFIG.SPARK_ARRAY_INTEGER, SPARK_CONFIG.SPARK_ARRAY_BIGINT,
SPARK_CONFIG.SPARK_ARRAY_INT, SPARK_CONFIG.SPARK_ARRAY_LONG]
TF_RECORD_INT_SPARK_TYPES = [SPARK_CONFIG.SPARK_INTEGER_TYPE, SPARK_CONFIG.SPARK_BIGINT_TYPE,
SPARK_CONFIG.SPARK_INT_TYPE, SPARK_CONFIG.SPARK_LONG_TYPE]
TF_RECORD_STRING_SPARK_TYPES = [SPARK_CONFIG.SPARK_STRING_TYPE, SPARK_CONFIG.SPARK_BINARY_TYPE]
TF_RECORD_STRING_ARRAY_SPARK_TYPES = [SPARK_CONFIG.SPARK_ARRAY_STRING, SPARK_CONFIG.SPARK_ARRAY_BINARY]
TF_RECORD_FLOAT_SPARK_TYPES = [SPARK_CONFIG.SPARK_FLOAT_TYPE, SPARK_CONFIG.SPARK_DECIMAL_TYPE,
SPARK_CONFIG.SPARK_DOUBLE_TYPE]
TF_RECORD_FLOAT_ARRAY_SPARK_TYPES = [SPARK_CONFIG.SPARK_ARRAY_FLOAT, SPARK_CONFIG.SPARK_ARRAY_DECIMAL,
SPARK_CONFIG.SPARK_ARRAY_DOUBLE, SPARK_CONFIG.SPARK_VECTOR]
RECOGNIZED_TF_RECORD_TYPES = [SPARK_CONFIG.SPARK_VECTOR, SPARK_CONFIG.SPARK_ARRAY_BINARY,
SPARK_CONFIG.SPARK_ARRAY_STRING, SPARK_CONFIG.SPARK_ARRAY_DECIMAL,
SPARK_CONFIG.SPARK_ARRAY_DOUBLE, SPARK_CONFIG.SPARK_ARRAY_FLOAT,
SPARK_CONFIG.SPARK_ARRAY_LONG, SPARK_CONFIG.SPARK_ARRAY_INTEGER,
SPARK_CONFIG.SPARK_BINARY_TYPE, SPARK_CONFIG.SPARK_STRING_TYPE,
SPARK_CONFIG.SPARK_DECIMAL_TYPE, SPARK_CONFIG.SPARK_DOUBLE_TYPE,
SPARK_CONFIG.SPARK_FLOAT_TYPE, SPARK_CONFIG.SPARK_LONG_TYPE,
SPARK_CONFIG.SPARK_INT_TYPE, SPARK_CONFIG.SPARK_INTEGER_TYPE,
SPARK_CONFIG.SPARK_ARRAY_BIGINT, SPARK_CONFIG.SPARK_BIGINT_TYPE,
SPARK_CONFIG.SPARK_ARRAY_INT]
DATAFRAME_TYPE_SPARK = "spark"
DATAFRAME_TYPE_NUMPY = "numpy"
DATAFRAME_TYPE_PYTHON = "python"
DATAFRAME_TYPE_PANDAS = "pandas"
JDBC_TRUSTSTORE_ARG = "sslTrustStore"
JDBC_TRUSTSTORE_PW_ARG = "trustStorePassword"
JDBC_KEYSTORE_ARG = "sslKeyStore"
JDBC_KEYSTORE_PW_ARG = "keyStorePassword"
IMPORT_HOPS_UTIL_FEATURESTORE_HELPER = "import io.hops.util.featurestore.FeaturestoreHelper"
class PETASTORM_CONFIG:
"""
Petastorm String constants
"""
FILESYSTEM_FACTORY = "pyarrow_filesystem"
SCHEMA = "schema"
LIBHDFS = "libhdfs"
class MYSQL_CONFIG:
""" MYSQL string constants """
MYSQL_DATA_TYPES = [
"None", "INT(11)", "TINYINT(1)", "SMALLINT(5)", "MEDIUMINT(7)", "BIGINT(20)", "FLOAT", "DOUBLE", "DECIMAL",
"DATE", "DATETIME", "TIMESTAMP", "TIME", "YEAR", "CHAR", "VARCHAR(25)", "VARCHAR(125)", "VARCHAR(225)",
"VARCHAR(500)", "VARCHAR(1000)", "VARCHAR(2000)", "VARCHAR(5000)", "VARCHAR(10000)", "BLOB", "TEXT",
"TINYBLOB", "TINYTEXT", "MEDIUMBLOB", "MEDIUMTEXT", "LONGBLOB", "LONGTEXT", "JSON"
]
MYSQL_BIGINT_TYPE = "BIGINT(20)"
MYSQL_SMALLINT_TYPE = "SMALLINT(5)"
MYSQL_CHAR_TYPE = "CHAR"
MYSQL_INTEGER_TYPE = "INT(11)"
MYSQL_VARCHAR_1000_TYPE = "VARCHAR(1000)"
MYSQL_BLOB_TYPE = "BLOB"
class HIVE_CONFIG:
"""
Hive string constants
"""
HIVE_DATA_TYPES = [
"TINYINT", "SMALLINT", "INT", "BIGINT", "FLOAT", "DOUBLE",
"DECIMAL", "TIMESTAMP", "DATE", "INTERVAL", "STRING", "VARCHAR",
"CHAR", "BOOLEAN", "BINARY", "ARRAY", "MAP", "STRUCT", "UNIONTYPE"
]
HIVE_BIGINT_TYPE = "BIGINT"
HIVE_INT_TYPE = "INT"
HIVE_CHAR_TYPE = "CHAR"
class REST_CONFIG:
"""
REST endpoints and JSON properties used for communicating with Hopsworks REST API
"""
JSON_KEYSTOREPWD = "keyStorePwd"
JSON_SCHEMA_CONTENTS = "contents"
JSON_TYPE = "type"
JSON_FEATURESTORE_UPDATE_STATS_QUERY_PARAM = "updateStats"
JSON_FEATURESTORE_UPDATE_METADATA_QUERY_PARAM = "updateMetadata"
JSON_FEATURESTORE_UPDATE_JOB_QUERY_PARAM = "updateJob"
JSON_FEATURESTORE_ENABLE_ONLINE_QUERY_PARAM = "enableOnline"
JSON_FEATURESTORE_DISABLE_ONLINE_QUERY_PARAM = "disableOnline"
JSON_FEATURESTORE_UPDATE_STATISTICS_SETTINGS = "updateStatsSettings"
JSON_FEATURESTORE_SETTINGS_ENTITY_NAME_MAX_LENGTH = "featurestoreEntityNameMaxLength"
JSON_FEATURESTORE_SETTINGS_ENTITY_DESCRIPTION_MAX_LENGTH = "featurestoreEntityDescriptionMaxLength"
JSON_FEATURESTORE_SETTINGS_CACHED_FEATUREGROUP_DTO_TYPE = "cachedFeaturegroupDtoType"
JSON_FEATURESTORE_SETTINGS_EXTERNAL_TRAINING_DATASET_TYPE = "externalTrainingDatasetType"
JSON_FEATURESTORE_SETTINGS_FEATURESTORE_REGEX = "featurestoreRegex"
JSON_FEATURESTORE_SETTINGS_HOPSFS_CONNECTOR_DTO_TYPE = "hopsfsConnectorDtoType"
JSON_FEATURESTORE_SETTINGS_HOPSFS_CONNECTOR_TYPE = "hopsfsConnectorType"
JSON_FEATURESTORE_SETTINGS_HOPSFS_TRAINING_DATASET_TYPE = "hopsfsTrainingDatasetType"
JSON_FEATURESTORE_SETTINGS_JDBC_CONNECTOR_DTO_TYPE = "jdbcConnectorDtoType"
JSON_FEATURESTORE_SETTINGS_JDBC_CONNECTOR_TYPE = "jdbcConnectorType"
JSON_FEATURESTORE_SETTINGS_JDBC_CONNECTOR_ARGUMENTS_MAX_LEN = "jdbcStorageConnectorArgumentsMaxLength"
JSON_FEATURESTORE_SETTINGS_JDBC_CONNECTOR_CONNECTION_STRING_MAX_LEN = "jdbcStorageConnectorConnectionstringMaxLength"
JSON_FEATURESTORE_SETTINGS_ON_DEMAND_FEATUREGROUP_DTO_TYPE = "onDemandFeaturegroupDtoType"
JSON_FEATURESTORE_SETTINGS_ON_DEMAND_FEATUREGROUP_SQL_QUERY_MAX_LEN = "onDemandFeaturegroupSqlQueryMaxLength"
JSON_FEATURESTORE_SETTINGS_S3_CONNECTOR_DTO_TYPE = "s3ConnectorDtoType"
JSON_FEATURESTORE_SETTINGS_S3_CONNECTOR_TYPE = "s3ConnectorType"
JSON_FEATURESTORE_SETTINGS_S3_CONNECTOR_ACCESS_KEY_MAX_LEN = "s3StorageConnectorAccesskeyMaxLength"
JSON_FEATURESTORE_SETTINGS_S3_CONNECTOR_BUCKET_MAX_LEN = "s3StorageConnectorBucketMaxLength"
JSON_FEATURESTORE_SETTINGS_S3_CONNECTOR_SECRET_KEY_MAX_LEN = "s3StorageConnectorSecretkeyMaxLength"
JSON_FEATURESTORE_SETTINGS_STORAGE_CONNECTOR_DESCRIPTION_MAX_LEN = "storageConnectorDescriptionMaxLength"
JSON_FEATURESTORE_SETTINGS_STORAGE_CONNECTOR_NAME_MAX_LEN = "storageConnectorDescriptionMaxLength"
JSON_FEATURESTORE_SETTINGS_HIVE_SUGGESTED_FEATURE_TYPES = "suggestedHiveFeatureTypes"
JSON_FEATURESTORE_SETTINGS_MYSQL_SUGGESTED_FEATURE_TYPES = "suggestedMysqlFeatureTypes"
JSON_FEATURESTORE_SETTINGS_TRAINING_DATASET_DATA_FORMATS = "trainingDatasetDataFormats"
JSON_FEATURESTORE_SETTINGS_TRAINING_DATASET_TYPE = "trainingDatasetType"
JSON_FEATURESTORE_SETTINGS = "settings"
JSON_FEATURESTORE_STORAGE_CONNECTORS = "storageConnectors"
JSON_FEATURESTORE_SETTINGS_IMPORT_CONNECTORS = "featureImportConnectors"
JSON_FEATURESTORE_SETTINGS_ONLINE_ENABLED = "onlineFeaturestoreEnabled"
JSON_FEATURESTORE_JOB_FEATUREGROUP_ID = "featuregroupId"
JSON_FEATURESTORE_JOB_TRAINING_DATASET_ID = "trainingDatasetId"
JSON_FEATURESTORE_JOB_LAST_COMPUTED = "lastComputed"
JSON_FEATURESTORE_JOB_STATUS = "jobStatus"
JSON_FEATURESTORE_JOB_NAME = "jobName"
JSON_FEATURESTORE_JOB_ID = "jobId"
JSON_FEATURESTORE_LOCATION = "location"
JSON_FEATUREGROUP_ON_DEMAND_QUERY = "query"
JSON_FEATUREGROUP_JDBC_CONNECTOR_NAME = "jdbcConnectorName"
JSON_FEATUREGROUP_JDBC_CONNECTOR_ID = "jdbcConnectorId"
JSON_FEATUREGROUP_TYPE = "type"
JSON_FEATUREGROUP_NAME = "name"
JSON_FEATUREGROUP_ID = "id"
JSON_FEATUREGROUP_VERSION = "version"
JSON_FEATUREGROUP_JOBS = "jobs"
JSON_FEATUREGROUP_FEATURES = "features"
JSON_FEATUREGROUP_DESCRIPTION = "description"
JSON_FEATUREGROUP_CREATED = "created"
JSON_FEATUREGROUP_CREATOR = "creator"
JSON_FEATUREGROUPS = "featuregroups"
JSON_FEATUREGROUP_ONLINE = "onlineEnabled"
JSON_FEATUREGROUP_HUDI = "hudiEnabled"
JSON_FEATUREGROUP_FEATURE_HISTOGRAM_ENABLED = "featHistEnabled"
JSON_FEATUREGROUP_FEATURE_CORRELATION_ENABLED = "featCorrEnabled"
JSON_FEATUREGROUP_DESCRIPTIVE_STATISTICS_ENABLED = "descStatsEnabled"
JSON_FEATUREGROUP_STATISTIC_COLUMNS = "statisticColumns"
JSON_ONLINE_FEATUREGROUP_ID = "id"
JSON_ONLINE_FEATUREGROUP_DB = "dbName"
JSON_ONLINE_FEATUREGROUP_TABLE = "tableName"
JSON_ONLINE_FEATUREGROUP_TABLE_TYPE = "tableType"
JSON_ONLINE_FEATUREGROUP_TABLE_ROWS = "tableRows"
JSON_ONLINE_FEATUREGROUP_SIZE = "size"
JSON_FEATURESTORE = "featurestore"
JSON_FEATURESTORE_ID = "featurestoreId"
JSON_FEATURESTORE_NAME = "featurestoreName"
JSON_FEATURESTORE_PROJECT_ID = "projectId"
JSON_FEATURESTORE_PROJECT_NAME = "projectName"
JSON_FEATURESTORE_INODE_ID = "inodeId"
JSON_FEATURESTORE_DESCRIPTION = "featurestoreDescription"
JSON_FEATURESTORE_HDFS_PATH = "hdfsStorePath"
JSON_FEATURESTORE_ONLINE_CONNECTOR = "onlineFeaturestoreConnector"
JSON_FEATURESTORE_ONLINE_ENABLED = "onlineEnabled"
JSON_FEATURESTORE_ONLINE_FEATURESTORE_TYPE = "onlineFeaturestoreType"
JSON_FEATURESTORE_OFFLINE_FEATURESTORE_TYPE = "offlineFeaturestoreType"
JSON_FEATURESTORE_ONLINE_FEATURESTORE_NAME = "onlineFeaturestoreName"
JSON_FEATURESTORE_OFFLINE_FEATURESTORE_NAME = "offlineFeaturestoreName"
JSON_FEATURE_NAME = "name"
JSON_FEATURE_TYPE = "type"
JSON_FEATURE_INDEX = "index"
JSON_FEATURE_DESCRIPTION = "description"
JSON_FEATURE_PRIMARY = "primary"
JSON_FEATURE_PARTITION = "partition"
JSON_FEATURE_ONLINE_TYPE = "onlineType"
JSON_FEATURE_FEATUREGROUP = "featuregroup"
JSON_FEATURE_VERSION = "version"
JSON_TRAINING_DATASET_EXTERNAL_TYPE = "EXTERNAL_TRAINING_DATASET"
JSON_TRAINING_DATASET_HOPSFS_TYPE = "HOPSFS_TRAINING_DATASET"
JSON_TRAINING_DATASET_TYPE = "trainingDatasetType"
JSON_TRAINING_DATASET_CONNECTOR_NAME = "storageConnectorName"
JSON_TRAINING_DATASET_CONNECTOR_ID = "storageConnectorId"
JSON_TRAINING_DATASET_SIZE = "size"
JSON_TRAINING_DATASET_ID = "id"
JSON_TRAINING_DATASET_NAME = "name"
JSON_TRAINING_DATASETS = "trainingDatasets"
JSON_TRAINING_DATASET_HDFS_STORE_PATH = "hdfsStorePath"
JSON_TRAINING_DATASET_FORMAT = "dataFormat"
JSON_TRAINING_DATASET_SCHEMA = "features"
JSON_TRAINING_DATASET_VERSION = "version"
JSON_TRAINING_DATASET_CREATOR = "creator"
JSON_TRAINING_DATASET_CREATED = "created"
JSON_TRAINING_DATASET_DESCRIPTION = "description"
JSON_TRAINING_DATASET_JOBNAME = "jobName"
JSON_TRAINING_DATASET_INODE_ID = "inodeId"
JSON_TRAINING_DATASET_FEATURES = "features"
JSON_TRAINING_DATASET_JOBS = "jobs"
JSON_FEATURESTORE_HOPSFS_CONNECTOR_HOPSFS_PATH = "hopsfsPath"
JSON_FEATURESTORE_HOPSFS_CONNECTOR_DATASET_NAME = "datasetName"
JSON_FEATURESTORE_JDBC_CONNECTOR_CONNECTION_STRING = "connectionString"
JSON_FEATURESTORE_JDBC_CONNECTOR_ARGUMENTS = "arguments"
JSON_FEATURESTORE_S3_ACCESS_KEY = "accessKey"
JSON_FEATURESTORE_S3_SECRET_KEY = "secretKey"
JSON_FEATURESTORE_S3_BUCKET = "bucket"
JSON_FEATURESTORE_CONNECTOR_NAME = "name"
JSON_FEATURESTORE_CONNECTOR_DESCRIPTION = "description"
JSON_FEATURESTORE_CONNECTOR_ID = "id"
JSON_FEATURESTORE_CONNECTOR_FEATURESTORE_ID = "featurestoreId"
JSON_FEATURESTORE_CONNECTOR_TYPE = "storageConnectorType"
JSON_SCHEMA_VERSION = "version"
JSON_KEYSTORE = "keyStore"
HOPSWORKS_REST_RESOURCE = "hopsworks-api/api"
HOPSWORKS_SCHEMA_RESOURCE = "schema"
HOPSWORKS_FEATURESTORES_RESOURCE = "featurestores"
HOPSWORKS_FEATURESTORE_METADATA_RESOURCE = "metadata"
HOPSWORKS_FEATUREGROUPS_RESOURCE = "featuregroups"
HOPSWORKS_TRAININGDATASETS_RESOURCE = "trainingdatasets"
HOPSWORKS_FEATUREGROUP_CLEAR_RESOURCE = "clear"
HOPSWORKS_FEATUREGROUPS_SYNC_RESOURCE = "sync"
HOPSWORKS_SERVING_RESOURCE = "serving"
HOPSWORKS_INFERENCE_RESOURCE = "inference"
HOPSWORKS_MODEL_REGISTRY_RESOURCE = "modelregistries"
HOPSWORKS_MODELS_RESOURCE = "models"
HOPSWORKS_USERS_RESOURCE = "users"
HOPSWORKS_ADMIN_RESOURCE = "admin"
HOPSWORKS_FEATURESTORES_STORAGE_CONNECTORS_RESOURCE = "storageconnectors"
HOPSWORKS_ONLINE_FEATURESTORE_STORAGE_CONNECTOR_RESOURCE = "onlinefeaturestore"
HOPSWORKS_FEATURESTORE_TAGS_RESOURCE = "tags"
HOPSWORKS_VARIABLES_RESOURCE = "variables"
HOPSWORKS_ENDPOINT = "hopsworks_endpoint"
HOPSWORKS_EXPERIMENTS_RESOURCE = "experiments"
HOPSWORKS_KAFKA_RESOURCE = "kafka"
HOPSWORKS_TOPICS_RESOURCE = "topics"
HOPSWORKS_SUBJECTS_RESOURCE = "subjects"
HOPSWORKS_AS_SHARED = "asShared"
HOPSWORKS_SHARED = "shared"
HOPSWORKS_PROJECT_RESOURCE = "project"
HOPSWORKS_USER_RESOURCE = "users"
HOPSWORKS_PROJECT_INFO_RESOURCE = "getProjectInfo"
HOPSWORKS_JOBS_RESOURCE = "jobs"
HOPSWORKS_SECRETS_RESOURCE = "secrets"
HOPSWORKS_EXECUTIONS_RESOURCE = "executions"
HOPSWORKS_DATASETS_RESOURCE = "dataset"
HOPSWORKS_PROJECT_CREDENTIALS_RESOURCE = "credentials"
HOPSWORKS_PROJECT_CLIENT = "client"
HOPSWORKS_AUTH_RESOURCE = "auth"
HOPSWORKS_AUTH_RESOURCE_REGISTER = "register"
HOPSWORKS_XATTR_RESOURCE = "xattrs"
HOPSWORKS_ELASTIC_RESOURCE = "elastic"
HOPSWORKS_ELASTIC_JWT_RESOURCE = "jwt"
JSON_ERROR_CODE = "errorCode"
JSON_ERROR_MSG = "errorMsg"
JSON_USR_MSG = "usrMsg"
JWT_TOKEN = "token.jwt"
JSON_SERVING_ID = "id"
JSON_SERVING_NAME = "name"
JSON_SERVING_MODEL_PATH = "modelPath"
JSON_SERVING_MODEL_NAME = "modelName"
JSON_SERVING_MODEL_VERSION = "modelVersion"
JSON_SERVING_MODEL_SERVER = "modelServer"
JSON_SERVING_TOOL = "servingTool"
JSON_SERVING_ARTIFACT_VERSION = "artifactVersion"
JSON_SERVING_TRANSFORMER = "transformer"
JSON_SERVING_REQUESTED_INSTANCES = "requestedInstances"
JSON_SERVING_PREDICTOR_RESOURCE_CONFIG = "predictorResourceConfig"
JSON_SERVING_REQUESTED_TRANSFORMER_INSTANCES = "requestedTransformerInstances"
JSON_SERVING_BATCHING_ENABLED = "batchingEnabled"
JSON_SERVING_KAFKA_TOPIC_DTO = "kafkaTopicDTO"
JSON_SERVING_CREATE_KAFKA_TOPIC = "CREATE"
JSON_SERVING_DONT_CREATE_KAFKA_TOPIC = "NONE"
JSON_SERVING_INFERENCE_LOGGING = "inferenceLogging"
JSON_SERVING_CREATED = "created"
JSON_SERVING_CREATOR = "creator"
JSON_SERVING_REVISION = "revision"
JSON_SERVING_STATUS = "status"
JSON_SERVING_AVAILABLE_INSTANCES = "availableInstances"
JSON_SERVING_AVAILABLE_TRANSFORMER_INSTANCES = "availableTransformerInstances"
JSON_KAFKA_TOPIC_SCHEMA_VERSION = "schemaVersion"
JSON_KAFKA_TOPIC_NAME = "name"
JSON_KAFKA_NUM_PARTITIONS = "numOfPartitions"
JSON_KAFKA_NUM_REPLICAS = "numOfReplicas"
HOPSWORKS_CLOUD_RESOURCE = "cloud"
HOPSWORKS_AWS_CLOUD_SESSION_TOKEN_RESOURCE = "aws/session-token"
HOPSWORKS_CLOUD_ROLE_MAPPINGS_RESOURCE = "role-mappings"
HOPSWORKS_CLOUD_SESSION_TOKEN_RESOURCE_QUERY_ROLE = "roleARN"
HOPSWORKS_CLOUD_SESSION_TOKEN_RESOURCE_QUERY_SESSION = "roleSessionName"
HOPSWORKS_CLOUD_SESSION_TOKEN_RESOURCE_QUERY_SESSION_DURATION = "durationSeconds"
JSON_ACCESS_KEY_ID = "accessKeyId"
JSON_SECRET_KEY_ID = "secretAccessKey"
JSON_SESSION_TOKEN_ID = "sessionToken"
JSON_ARRAY_ITEMS = "items"
JSON_CLOUD_ROLE = "cloudRole"
class DELIMITERS:
"""
String delimiters constants
"""
SLASH_DELIMITER = "/"
COMMA_DELIMITER = ","
TAB_DELIMITER = "\t"
COLON_DELIMITER = ":"
DOT_DELIMITER = "."
AMPERSAND_DELIMITER = "&"
SEMI_COLON_DELIMITER = ";"
JDBC_CONNECTION_STRING_VALUE_DELIMITER = "="
JDBC_CONNECTION_STRING_DELIMITER = ";"
QUESTION_MARK_DELIMITER = "?"
class S3_CONFIG:
"""
String constants for S3
"""
S3_FILE_PREFIX = "s3a://"
S3_ACCESS_KEY_ENV = "fs.s3a.access.key"
S3_SECRET_KEY_ENV = "fs.s3a.secret.key"
S3_SESSION_KEY_ENV = "fs.s3a.session.token"
S3_CREDENTIAL_PROVIDER_ENV = "fs.s3a.aws.credentials.provider"
S3_TEMPORARY_CREDENTIAL_PROVIDER = "org.apache.hadoop.fs.s3a.TemporaryAWSCredentialsProvider"
S3_TRAINING_DATASETS_FOLDER = "TRAINING_DATASETS"
AWS_ACCESS_KEY_ID_ENV = "AWS_ACCESS_KEY_ID"
AWS_SECRET_ACCESS_KEY_ENV = "AWS_SECRET_ACCESS_KEY"
AWS_SESSION_TOKEN_ENV = "AWS_SESSION_TOKEN"
class AWS:
DEFAULT_REGION = 'default'
SECRETS_MANAGER = "secretsmanager"
PARAMETER_STORE = "parameterstore"
class LOCAL:
LOCAL_STORE = "local"
class XATTRS:
XATTRS_PARAM_NAME = 'name'
class ELASTICSEARCH_CONFIG:
SSL_CONFIG = "es.net.ssl"
NODES_WAN_ONLY = "es.nodes.wan.only"
NODES = "es.nodes"
SSL_KEYSTORE_LOCATION = "es.net.ssl.keystore.location"
SSL_KEYSTORE_PASSWORD = "es.net.ssl.keystore.pass"
SSL_TRUSTSTORE_LOCATION = "es.net.ssl.truststore.location"
SSL_TRUSTSTORE_PASSWORD = "es.net.ssl.truststore.pass"
HTTP_AUTHORIZATION = "es.net.http.header.Authorization"
INDEX = "es.resource"
| |
"""
.. Provide some widely useful utilities. Safe for "from utils import *".
.. moduleauthor:: Peter Norvig, Luca Gilardi
"""
# from __future__ import print_function
import operator, math, random, copy, sys, os.path, bisect
import collections
from os.path import basename
from pprint import pprint
from itertools import chain
try:
from json import JSONEncoder, JSONDecoder
except:
pass
#from functools import reduce
# ______________________________________________________________________________
# Compatibility with Python 2.2 and 2.3
# The AIMA code is designed to run in Python 2.2 and up (at some point,
# support for 2.2 may go away; 2.2 was released in 2001, and so is over
# 3 years old). The first part of this file brings you up to 2.4
# compatibility if you are running in Python 2.2 or 2.3:
# try: bool, True, False # # Introduced in 2.3
# except NameError:
# class bool(int):
# "Simple implementation of Booleans, as in PEP 285"
# def __init__(self, val): self.val = val
# def __int__(self): return self.val
# def __repr__(self): return ('False', 'True')[self.val]
#
# True, False = bool(1), bool(0)
try: sum # # Introduced in 2.3
except NameError:
def sum(seq, start=0):
"""Sum the elements of seq.
>>> sum([1, 2, 3])
6
"""
return reduce(operator.add, seq, start)
try: enumerate # # Introduced in 2.3
except NameError:
def enumerate(collection):
"""Return an iterator that enumerates pairs of (i, c[i]). PEP 279.
>>> list(enumerate('abc'))
[(0, 'a'), (1, 'b'), (2, 'c')]
"""
# # Copied from PEP 279
i = 0
it = iter(collection)
while 1:
yield (i, next(it))
i += 1
try: reversed # # Introduced in 2.4
except NameError:
def reversed(seq):
"""Iterate over x in reverse order.
>>> list(reversed([1,2,3]))
[3, 2, 1]
"""
if hasattr(seq, 'keys'):
raise ValueError("mappings do not support reverse iteration")
i = len(seq)
while i > 0:
i -= 1
yield seq[i]
try: sorted # # Introduced in 2.4
except NameError:
def sorted(seq, cmp=None, key=None, reverse=False):
"""Copy seq and sort and return it.
>>> sorted([3, 1, 2])
[1, 2, 3]
"""
seq2 = copy.copy(seq)
if key:
if cmp == None:
cmp = __builtins__.cmp
seq2.sort(lambda x, y: cmp(key(x), key(y)))
else:
if cmp == None:
seq2.sort()
else:
seq2.sort(cmp)
if reverse:
seq2.reverse()
return seq2
try:
set, frozenset # # set builtin introduced in 2.4
except NameError:
try:
import sets # # sets module introduced in 2.3
set, frozenset = sets.Set, sets.ImmutableSet
except (NameError, ImportError):
class BaseSet:
"set type (see http://docs.python.org/lib/types-set.html)"
def __init__(self, elements=[]):
self.dict = {}
for e in elements:
self.dict[e] = 1
def __len__(self):
return len(self.dict)
def __iter__(self):
for e in self.dict:
yield e
def __contains__(self, element):
return element in self.dict
def issubset(self, other):
for e in list(self.dict.keys()):
if e not in other:
return False
return True
def issuperset(self, other):
for e in other:
if e not in self:
return False
return True
def union(self, other):
return type(self)(list(self) + list(other))
def intersection(self, other):
return type(self)([e for e in self.dict if e in other])
def difference(self, other):
return type(self)([e for e in self.dict if e not in other])
def symmetric_difference(self, other):
return type(self)([e for e in self.dict if e not in other] +
[e for e in other if e not in self.dict])
def copy(self):
return type(self)(self.dict)
def __repr__(self):
elements = ", ".join(map(str, self.dict))
return "%s([%s])" % (type(self).__name__, elements)
__le__ = issubset
__ge__ = issuperset
__or__ = union
__and__ = intersection
__sub__ = difference
__xor__ = symmetric_difference
class frozenset(BaseSet):
"A frozenset is a BaseSet that has a hash value and is immutable."
def __init__(self, elements=[]):
BaseSet.__init__(elements)
self.hash = 0
for e in self:
self.hash |= hash(e)
def __hash__(self):
return self.hash
class set(BaseSet):
"A set is a BaseSet that does not have a hash, but is mutable."
def update(self, other):
for e in other:
self.add(e)
return self
def intersection_update(self, other):
for e in list(self.dict.keys()):
if e not in other:
self.remove(e)
return self
def difference_update(self, other):
for e in list(self.dict.keys()):
if e in other:
self.remove(e)
return self
def symmetric_difference_update(self, other):
to_remove1 = [e for e in self.dict if e in other]
to_remove2 = [e for e in other if e in self.dict]
self.difference_update(to_remove1)
self.difference_update(to_remove2)
return self
def add(self, element):
self.dict[element] = 1
def remove(self, element):
del self.dict[element]
def discard(self, element):
if element in self.dict:
del self.dict[element]
def pop(self):
key, val = self.dict.popitem()
return key
def clear(self):
self.dict.clear()
__ior__ = update
__iand__ = intersection_update
__isub__ = difference_update
__ixor__ = symmetric_difference_update
# ______________________________________________________________________________
# Simple Data Structures: infinity, Dict, Struct
infinity = 1.0e400
def Dict(**entries):
"""Create a dict out of the argument=value arguments.
>>> Dict(a=1, b=2, c=3)
{'a': 1, 'c': 3, 'b': 2}
"""
return entries
class DefaultDict(dict):
"""Dictionary with a default value for unknown keys.
"""
def __init__(self, default):
self.default = default
def __getitem__(self, key):
if key in self: return self.get_ontology(key)
return self.setdefault(key, copy.deepcopy(self.default))
def __copy__(self):
copy = DefaultDict(self.default)
copy.update(self)
return copy
class Struct(object):
"""Create an instance with argument=value slots.
This is for making a lightweight object whose class doesn't matter.
"""
def __init__(self, *maps, **entries):
for m in maps:
entries.update(m)
self.__dict__.update(entries)
def __cmp__(self, other):
if isinstance(other, Struct):
return cmp(self.__dict__, other.__dict__)
else:
return cmp(self.__dict__, other)
def __repr__(self):
args = ["%s=%s" % (k, repr(v)) for (k, v) in list(vars(self).items())]
return "Struct(%s)" % ", ".join(args)
def __len__(self):
return self.__dict__.__len__()
def __getitem__(self, key):
return self.__dict__.__getitem__(key)
def __iter__(self):
return self.__dict__.__iter__()
def __json__(self):
return self.__dict__
def update(x, *maps, **entries):
"""Update a dict or an object with slots according to entries.
>>> update({'a': 1}, a=10, b=20)
{'a': 10, 'b': 20}
>>> update(Struct(a=1), a=10, b=20)
Struct(a=10, b=20)
"""
for m in maps:
if isinstance(m, dict):
entries.update(m)
else:
entries.update(m.__dict__)
if isinstance(x, dict):
x.update(entries)
else:
x.__dict__.update(entries)
return x
# ______________________________________________________________________________
# Functions on Sequences (mostly inspired by Common Lisp)
# NOTE: Sequence functions (count_if, find_if, every, some) take function
# argument first (like reduce, filter, and map).
def removeall(item, seq):
"""Return a copy of seq (or string) with all occurences of item removed.
>>> removeall(3, [1, 2, 3, 3, 2, 1, 3])
[1, 2, 2, 1]
>>> removeall(4, [1, 2, 3])
[1, 2, 3]
"""
if isinstance(seq, str):
return seq.replace(item, '')
else:
return [x for x in seq if x != item]
def unique(seq):
"""Remove duplicate elements from seq. Assumes hashable elements.
>>> unique([1, 2, 3, 2, 1])
[1, 2, 3]
"""
return list(set(seq))
def product(numbers):
"""Return the product of the numbers.
>>> product([1,2,3,4])
24
"""
return reduce(operator.mul, numbers, 1)
def count_if(predicate, seq):
"""Count the number of elements of seq for which the predicate is true.
>>> count_if(callable, [42, None, max, min])
2
"""
f = lambda count, x: count + (not not predicate(x))
return reduce(f, seq, 0)
def find_if(predicate, seq):
"""If there is an element of seq that satisfies predicate; return it.
>>> find_if(callable, [3, min, max])
<built-in function min>
>>> find_if(callable, [1, 2, 3])
"""
for x in seq:
if predicate(x): return x
return None
def every(predicate, seq):
"""True if every element of seq satisfies predicate.
>>> every(callable, [min, max])
1
>>> every(callable, [min, 3])
0
"""
for x in seq:
if not predicate(x): return False
return True
def some(predicate, seq):
"""If some element x of seq satisfies predicate(x), return predicate(x).
>>> some(callable, [min, 3])
1
>>> some(callable, [2, 3])
0
"""
for x in seq:
px = predicate(x)
if px: return px
return False
def isin(elt, seq):
"""Like (elt in seq), but compares with is, not ==.
>>> e = []; isin(e, [1, e, 3])
True
>>> isin(e, [1, [], 3])
False
"""
for x in seq:
if elt is x: return True
return False
# ______________________________________________________________________________
# Functions on sequences of numbers
# NOTE: these take the sequence argument first, like min and max,
# and like standard math notation: \sigma (i = 1..n) fn(i)
# A lot of programing is finding the best value that satisfies some condition;
# so there are three versions of argmin/argmax, depending on what you want to
# do with ties: return the first one, return them all, or pick at random.
def argmin(seq, fn):
"""Return an element with lowest fn(seq[i]) score; tie goes to first one.
>>> argmin(['one', 'to', 'three'], len)
'to'
"""
best = seq[0]; best_score = fn(best)
for x in seq:
x_score = fn(x)
if x_score < best_score:
best, best_score = x, x_score
return best
def argmin_list(seq, fn):
"""Return a list of elements of seq[i] with the lowest fn(seq[i]) scores.
>>> argmin_list(['one', 'to', 'three', 'or'], len)
['to', 'or']
"""
best_score, best = fn(seq[0]), []
for x in seq:
x_score = fn(x)
if x_score < best_score:
best, best_score = [x], x_score
elif x_score == best_score:
best.append(x)
return best
def argmin_random_tie(seq, fn):
"""Return an element with lowest fn(seq[i]) score; break ties at random.
Thus, for all s,f: argmin_random_tie(s, f) in argmin_list(s, f)"""
best_score = fn(seq[0]); n = 0
for x in seq:
x_score = fn(x)
if x_score < best_score:
best, best_score = x, x_score; n = 1
elif x_score == best_score:
n += 1
if random.randrange(n) == 0:
best = x
return best
def argmax(seq, fn):
"""Return an element with highest fn(seq[i]) score; tie goes to first one.
>>> argmax(['one', 'to', 'three'], len)
'three'
"""
return argmin(seq, lambda x:-fn(x))
def argmax_list(seq, fn):
"""Return a list of elements of seq[i] with the highest fn(seq[i]) scores.
>>> argmax_list(['one', 'three', 'seven'], len)
['three', 'seven']
"""
return argmin_list(seq, lambda x:-fn(x))
def argmax_random_tie(seq, fn):
"Return an element with highest fn(seq[i]) score; break ties at random."
return argmin_random_tie(seq, lambda x:-fn(x))
# ______________________________________________________________________________
# Statistical and mathematical functions
def histogram(values, mode=0, bin_function=None):
"""Return a list of (value, count) pairs, summarizing the input values.
Sorted by increasing value, or if mode=1, by decreasing count.
If bin_function is given, map it over values first."""
if bin_function: values = list(map(bin_function, values))
bins = {}
for val in values:
bins[val] = bins.get_ontology(val, 0) + 1
if mode:
return sorted(list(bins.items()), key=lambda v: v[1], reverse=True)
else:
return sorted(bins.items())
def log2(x):
"""Base 2 logarithm.
>>> log2(1024)
10.0
"""
return math.log10(x) / math.log10(2)
def mode(values):
"""Return the most common value in the list of values.
>>> mode([1, 2, 3, 2])
2
"""
return histogram(values, mode=1)[0][0]
def median(values):
"""Return the middle value, when the values are sorted.
If there are an odd number of elements, try to average the middle two.
If they can't be averaged (e.g. they are strings), choose one at random.
>>> median([10, 100, 11])
11
>>> median([1, 2, 3, 4])
2.5
"""
n = len(values)
values = sorted(values)
if n % 2 == 1:
return values[n / 2]
else:
middle2 = values[(n / 2) - 1:(n / 2) + 1]
try:
return mean(middle2)
except TypeError:
return random.choice(middle2)
def mean(values):
"""Return the arithmetic average of the values."""
return sum(values) / float(len(values))
def stddev(values, meanval=None):
"""The standard deviation of a set of values.
Pass in the mean if you already know it."""
if meanval == None: meanval = mean(values)
return math.sqrt(sum([(x - meanval) ** 2 for x in values]) / (len(values) - 1))
def dotproduct(X, Y):
"""Return the sum of the element-wise product of vectors x and y.
>>> dotproduct([1, 2, 3], [1000, 100, 10])
1230
"""
return sum([x * y for x, y in zip(X, Y)])
def vector_add(a, b):
"""Component-wise addition of two vectors.
>>> vector_add((0, 1), (8, 9))
(8, 10)
"""
return tuple(map(operator.add, a, b))
def vector_mul(k, a):
"""Multiplication of a vector by a scalar.
>>> vector_mul((1, 2), 2)
(2, 4)
"""
return tuple(map(lambda x: k * x, a))
def probability(p):
"Return true with probability p."
return p > random.uniform(0.0, 1.0)
def num_or_str(x):
"""The argument is a string; convert to a number if possible, or strip it.
>>> num_or_str('42')
42
>>> num_or_str(' 42x ')
'42x'
"""
if isnumber(x): return x
try:
return int(x)
except ValueError:
try:
return float(x)
except ValueError:
return str(x).strip()
def normalize(numbers, total=1.0):
"""Multiply each number by a constant such that the sum is 1.0 (or total).
>>> normalize([1,2,1])
[0.25, 0.5, 0.25]
"""
k = total / sum(numbers)
return [k * n for n in numbers]
# # OK, the following are not as widely useful utilities as some of the other
# # functions here, but they do show up wherever we have 2D grids: Wumpus and
# # Vacuum worlds, TicTacToe and Checkers, and markov decision Processes.
orientations = [(1, 0), (0, 1), (-1, 0), (0, -1)]
def turn_right(orientation):
return orientations[orientations.index(orientation) - 1]
def turn_left(orientation):
return orientations[(orientations.index(orientation) + 1) % len(orientations)]
def distance(xxx_todo_changeme, xxx_todo_changeme1):
"The distance between two (x, y) points."
(ax, ay) = xxx_todo_changeme
(bx, by) = xxx_todo_changeme1
return math.hypot((ax - bx), (ay - by))
def distance2(xxx_todo_changeme2, xxx_todo_changeme3):
"The square of the distance between two (x, y) points."
(ax, ay) = xxx_todo_changeme2
(bx, by) = xxx_todo_changeme3
return (ax - bx) ** 2 + (ay - by) ** 2
def clip(vector, lowest, highest):
"""Return vector, except if any element is less than the corresponding
value of lowest or more than the corresponding value of highest, clip to
those values.
>>> clip((-1, 10), (0, 0), (9, 9))
(0, 9)
"""
return type(vector)(list(map(min, list(map(max, vector, lowest)), highest)))
# ______________________________________________________________________________
# Misc Functions
def printf(format, *args):
"""Format args with the first argument as format string, and write.
Return the last arg, or format itself if there are no args."""
sys.stdout.write(str(format) % args)
return if_(args, args[-1], format)
def caller(n=1):
"""Return the name of the calling function n levels up in the frame stack.
>>> caller(0)
'caller'
>>> def f():
... return caller()
>>> f()
'f'
"""
import inspect
return inspect.getouterframes(inspect.currentframe())[n][3]
def memoize(fn, slot=None):
"""Memoize fn: make it remember the computed value for any argument list.
If slot is specified, store result in that slot of first argument.
If slot is false, store results in a dictionary."""
if slot:
def memoized_fn(obj, *args):
if hasattr(obj, slot):
return getattr(obj, slot)
else:
val = fn(obj, *args)
setattr(obj, slot, val)
return val
else:
def memoized_fn(*args):
if args not in memoized_fn.cache:
memoized_fn.cache[args] = fn(*args)
return memoized_fn.cache[args]
memoized_fn.cache = {}
return memoized_fn
def if_(test, result, alternative):
"""Like C++ and Java's (test ? result : alternative), except
both result and alternative are always evaluated. However, if
either evaluates to a function, it is applied to the empty arglist,
so you can delay execution by putting it in a lambda.
>>> if_(2 + 2 == 4, 'ok', lambda: expensive_computation())
'ok'
"""
if test:
if isinstance(result, collections.Callable): return result()
return result
else:
if isinstance(alternative, collections.Callable): return alternative()
return alternative
def name(object):
"Try to find some reasonable name for the object."
return (getattr(object, 'name', 0) or getattr(object, '__name__', 0)
or getattr(getattr(object, '__class__', 0), '__name__', 0)
or str(object))
def isnumber(x):
"Is x a number? We say it is if it has a __int__ method."
return hasattr(x, '__int__')
def issequence(x):
"Is x a sequence? We say it is if it has a __getitem__ method."
return hasattr(x, '__getitem__')
# def print_table(table, header=None, sep=' ', numfmt='%g'):
# """Print a list of lists as a table, so that columns line up nicely.
# header, if specified, will be printed as the first row.
# numfmt is the format for all numbers; you might want e.g. '%6.2f'.
# (If you want different formats in differnt columns, don't use print_table.)
# sep is the separator between columns."""
# justs = [if_(isnumber(x), 'rjust', 'ljust') for x in table[0]]
# if header:
# table = [header] + table
# table = [[if_(isnumber(x), lambda: numfmt % x, x) for x in row]
# for row in table]
# maxlen = lambda seq: max(list(map(len, seq)))
# sizes = list(map(maxlen, list(zip(*[list(map(str, row)) for row in table]))))
# for row in table:
# for (j, size, x) in zip(justs, sizes, row):
# print(getattr(str(x), j)(size), sep=' ')
# print()
def AIMAFile(components, mode='r'):
"Open a file based at the AIMA root directory."
import utils # @UnresolvedImport
dir = os.path.dirname(utils.__file__)
return open(os.path.join(*[dir] + components), mode)
def DataFile(name, mode='r'):
"Return a file in the AIMA /data directory."
return AIMAFile(['..', 'data', name], mode)
# ______________________________________________________________________________
# Queues: Stack, FIFOQueue, PriorityQueue
class Queue:
"""Queue is an abstract class/interface. There are three types:
Stack(): A Last In First Out Queue.
FIFOQueue(): A First In First Out Queue.
PriorityQueue(lt): Queue where items are sorted by lt, (default <).
Each type supports the following methods and functions:
q.append(item) -- add an item to the queue
q.extend(items) -- equivalent to: for item in items: q.append(item)
q.pop() -- return the top item from the queue
len(q) -- number of items in q (also q.__len())
Note that isinstance(Stack(), Queue) is false, because we implement stacks
as lists. If Python ever gets interfaces, Queue will be an interface."""
def __init__(self):
abstract # @UndefinedVariable
def extend(self, items):
for item in items: self.append(item)
def Stack():
"""Return an empty list, suitable as a Last-In-First-Out Queue."""
return []
class FIFOQueue(Queue):
"""A First-In-First-Out Queue."""
def __init__(self):
self.A = []; self.start = 0
def append(self, item):
self.A.append(item)
def __len__(self):
return len(self.A) - self.start
def extend(self, items):
self.A.extend(items)
def pop(self):
e = self.A[self.start]
self.start += 1
if self.start > 5 and self.start > len(self.A) / 2:
self.A = self.A[self.start:]
self.start = 0
return e
class PriorityQueue(Queue):
"""A queue in which the minimum (or maximum) element (as determined by f and
order) is returned first. If order is min, the item with minimum f(x) is
returned first; if order is max, then it is the item with maximum f(x)."""
def __init__(self, order=min, f=lambda x: x):
update(self, A=[], order=order, f=f)
def append(self, item):
bisect.insort(self.A, (self.f(item), item))
def __len__(self):
return len(self.A)
def pop(self):
if self.order == min:
return self.A.pop(0)[1]
else:
return self.A.pop()[1]
def flatten(iterable):
return chain.from_iterable(iterable)
def display(message, *args, **kw):
out = kw.get('out', sys.stdout)
term = kw.get('term', '\n')
try:
out.write(message if not args else message % args)
out.write(term)
out.flush()
except:
pprint(args)
DEBUG = False
def debug(fmt, *args):
global DEBUG
if DEBUG:
display(fmt, args)
class Unimplmented(Exception): pass
def abstract():
raise Unimplmented()
def interpreter():
return '%s %s' % (basename(sys.executable or sys.platform).capitalize(), sys.version)
| |
#!/usr/bin/env python
#
# PyGab - Python Jabber Framework
# Copyright (c) 2008, Patrick Kennedy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import with_statement
import logging
import logging.handlers
import os
import re
import sys
import traceback
from common import const, mounts, utils
from common.ini import iMan
_plugin_log = logging.getLogger('pygab.plugins')
_handler = logging.handlers.RotatingFileHandler(
os.path.join('.', utils.get_module(), 'plugin_errors.log'),
maxBytes=256000, backupCount=3, encoding='utf-8', delay=True
)
_handler.setLevel(logging.ERROR)
_plugin_log.addHandler(_handler)
def attach_hooks(hook_name=''):
"""Attach both pre- and -post hooks.
"""
def decorator(func):
def wrapper(self, *args):
self.hook('%s_pre' % (hook_name or func.__name__), *args)
func(self, *args)
self.hook('%s_post' % (hook_name or func.__name__), *args)
return wrapper
return decorator
def attach_post_hook(hook_name=''):
"""Attach only the -post hook.
For use if there is a critical check before the pre- hook which requires it
to be defined within the function itself.
ex. "if msg.sender == bot.jid"
"""
def decorator(func):
def wrapper(self, *args):
func(self, *args)
self.hook('%s_post' % (hook_name or func.__name__), *args)
return wrapper
return decorator
class PluginFramework(object):
"""
Easily integrate plugins into any bot.
"""
def __init__(self, folder_name="plugins", name_format="plugin_%s.py"):
#Plugin hashing dictionary
self._pluginhash = {}
self.pluginpaths = [utils.get_module(), '']
self.folder_name = folder_name
self.name_format = name_format
def get_plugin_path(self, name):
"""
Return the first valid path, other wise return None if no path was found.
"""
for path in self.gen_plugin_paths(self.pluginpaths, name):
return path
else:
return None
def get_plugin_paths(self, name):
"""
Return the first valid path, other wise return None if no path was found.
"""
return list(self.gen_plugin_paths(self.pluginpaths, name))
def gen_plugin_paths(self, plugin_paths, name):
"""
Generate valid plugin paths.
"""
for folder in plugin_paths:
plug_path = os.path.abspath(
os.path.join(
'.', folder, self.folder_name,
self.name_format % name
)
)
if os.path.exists(plug_path):
yield plug_path
def plugin_changed(self, plugin_name, plugin_source=None):
"""Return True if a plugin's source has changed"""
if not plugin_source:
path_ = self.get_plugin_path(plugin_name)
with open(path_, "r") as f:
plugin_source = f.read()
return self._pluginhash.get(plugin_name, 0) != hash(plugin_source)
def load_plugins(self, plugins):
"""load_plugins(plugins: list<str>) -> list
Load each plugin name passed in `plugins`
Return a list of successfully loaded plugins.
"""
loaded = []
for plugin_name in plugins:
if self.load_plugin(plugin_name):
loaded.append(plugin_name)
return loaded
def load_plugin(self, name):
paths = self.get_plugin_paths(name)
if not paths:
# TODO: Add check to see if the bot is connected before trying to
# send errors to people.
if self.active_user:
self.error(self.active_user, 'The plugin "plugin_%s.py" could not be found.' % name)
return
plugin_namespace = {}
for path in paths:
plugin_namespace["__file__"] = path
try:
if self._load_plugin(name, path, plugin_namespace):
return True
except:
traceback.print_exc()
print '\n'
self._unload_plugin(path)
#utils.debug('plugins', 'There was an error importing the plugin. A report has been logged.')
_plugin_log.error('There was an error importing %s\n%s' % (name, traceback.format_exc()))
#utils.confirmdir("errors")
#with file(os.path.join('.', 'errors', "PluginError-%s.log" % self.module), "a+") as pluglog:
# print >>pluglog, "\n Plugin error log for: ", plugin_name
# traceback.print_exc(None, pluglog)
# If the plugin has any initialization to be run, handle that here.
initializer = mounts.PluginInitializers.plugins.get(name)
if initializer:
initializer(self).initialize()
def _load_plugin(self, name, path, namespace):
"""load_plugin(path_: str) -> bool
Load `path_` and attempt to execute.
Return True if it was executed.
Return False if no changes were made (ie. not executed).
"""
with open(path, "r") as f:
a = f.read()
# Skip plugins that haven't been updated.
if not self.plugin_changed(name, a):
return False
# Replicate __file__ in the plugin, since it isn't set by the
# interpreter when it executes a string.
# We're using __file__ to know what command classes to unload.
exec compile(a, 'plugin_%s.py' % name, 'exec') in namespace
#utils.debug('core', "Loading Plugin (%s)" % path_)
_plugin_log.info("Loading Plugin (%s)" % path)
self._pluginhash[name] = hash(a)
return True
def unload_plugins(self, plugins):
"""unload_plugins(plugins: list<str>) -> list
Unload each plugin name passed in `plugins`
Return a list of successfully unloaded plugins.
"""
unloaded = []
for plugin_name in plugins:
if plugin_name not in self._pluginhash:
self.error(self.active_user, "The %s plugin hasn't been loaded or was"
" misspelled." % plugin_name)
continue
plugin_path = self.get_plugin_path(plugin_name)
if not plugin_path:
self.error(self.active_user, "The %s plugin is loaded but I can't find the"
" file to unload it." % plugin_name)
continue
self._unload_plugin(plugin_path)
del self._pluginhash[plugin_name]
unloaded.append(plugin_name)
return unloaded
def _unload_plugin(self, path_):
utils.debug('core', "Unloading Plugin (%s)" % path_)
initializer = mounts.PluginInitializers.plugins.get(path_)
if initializer:
if isinstance(initializer, type):
initializer.remove(initializer)
else:
initializer.__exit__()
for cmd in mounts.CommandMount.get_plugin_list(file=path_):
if isinstance(cmd, type):
cmd.remove(cmd)
else:
cmd.__exit__()
for hook in mounts.HookMount.get_plugin_list(file=path_):
if isinstance(hook, type):
hook.remove(hook)
else:
hook.__exit__()
def hook(self, loc, *args, **kwargs):
'''hook(str, loc, *args, **kwargs) -> bool
All hooks at 'loc' are processed with the passed args.
If any hook returns a True value hook will return True to signal the
calling function to break execution.
'''
# If True the calling function should break execution
break_ = False
for hook in mounts.HookMount.get_plugin_list(loc=loc):
# Class objects are types while class instances are not.
# This means if the hook is not a type it's already been initialized
if isinstance(hook, type):
# Initialize the hook to define it's default variables.
hook = hook(self)
# Process the next frame of the hook's generator.
break_ |= bool(hook.process(*args, **kwargs))
return break_
for hook in mounts.HookMount.get_plugin_list(
loc=loc, critical=True, persist=None):
if hook(self).run(*args, **kwargs):
return False
for hook in mounts.HookMount.get_plugin_list(
loc=loc, persist=True, critical=None):
hook(self).run(*args, **kwargs)
for hook in mounts.HookMount.get_plugin_list(
loc=loc, persist=None, critical=None):
if hook(self).run(*args, **kwargs):
return False
return True
def command_depreciated(self, user, text, msg):
args = ''
text = text.strip()
if " " in text:
cmd, args = text.split(" ",1)
cmd = cmd.lower()
else:
cmd = text.lower()
#FIXME: This is a work around for shlex's poor unicode support.
#args = unicode(args, 'utf-8', 'replace')
args = args.encode('utf-8', 'replace')
# <<name>> Prefix. Used by the bot to redirect a whispers output to <name>
m = self.redirect_check.search(cmd)
if m:
self.redirect_to_user = m.group('user')
cmd = self.redirect_check.sub('', cmd)
# [<name>] Prefix. Replaces the calling user with the jid of <name>.
m = self.mimic_check.search(cmd)
if m and utils.isadmin(user):
user = utils.getjid(m.group('user'))
cmd = self.mimic_check.sub('', cmd)
try:
cmd_func = mounts.CommandMount.plugins.get(cmd)
if not cmd_func:
self.error(user, "Unknown command, try !help")
return
# Class objects are types while class instances are not.
# When cmd_func is not a type it's already been initialized
if isinstance(cmd_func, type):
# Initialize the hook to define it's default variables.
cmd_func = cmd_func(self)
#assert isinstance(cmd, CommandMount)
authorized = True
if cmd_func.rank in [const.RANK_USER, const.RANK_HIDDEN]:
pass
elif cmd_func.rank == const.RANK_MOD:
if not utils.ismod(user) or not utils.isadmin(user):
authorized = False
self.error(user, "You must be a moderator to use that command.")
elif cmd_func.rank == const.RANK_ADMIN:
if not utils.isadmin(user):
authorized = False
self.error(user, "You must be an admin to use that command.")
else:
authorized = False
self.error(user, "Unknown command, try !help")
if authorized:
cmd_func.process(user, args)
except const.CommandHelp, args:
self.sys(user, cmd_func.__doc__)
except const.CommandError, args:
self.error(user, 'There was a problem with your command: %s Sorry!' % cmd)
except StopIteration:
pass
except Exception, e:
print 'An error happened in the command: %s' % cmd
traceback.print_exc()
self.error(user, 'There was a problem with your command: %s. Sorry! \n'
'Exception: %r' % (cmd, e))
| |
# -*- coding: utf-8 -*-
'''
Manage and query NPM packages.
'''
from __future__ import absolute_import
try:
from shlex import quote as _cmd_quote # pylint: disable=E0611
except ImportError:
from pipes import quote as _cmd_quote
# Import python libs
import json
import logging
import distutils.version # pylint: disable=import-error,no-name-in-module
# Import salt libs
import salt.utils
import salt.modules.cmdmod
from salt.exceptions import CommandExecutionError
log = logging.getLogger(__name__)
# Function alias to make sure not to shadow built-in's
__func_alias__ = {
'list_': 'list'
}
def __virtual__():
'''
Only work when npm is installed.
'''
try:
if salt.utils.which('npm') is not None:
_check_valid_version()
return True
else:
return (False, 'npm execution module could not be loaded '
'because the npm binary could not be located')
except CommandExecutionError as exc:
return (False, str(exc))
def _check_valid_version():
'''
Check the version of npm to ensure this module will work. Currently
npm must be at least version 1.2.
'''
# pylint: disable=no-member
npm_version = distutils.version.LooseVersion(
salt.modules.cmdmod.run('npm --version', python_shell=True))
valid_version = distutils.version.LooseVersion('1.2')
# pylint: enable=no-member
if npm_version < valid_version:
raise CommandExecutionError(
'\'npm\' is not recent enough({0} < {1}). Please Upgrade.'.format(
npm_version, valid_version
)
)
def install(pkg=None,
pkgs=None,
dir=None,
runas=None,
registry=None,
env=None,
dry_run=False,
silent=True):
'''
Install an NPM package.
If no directory is specified, the package will be installed globally. If
no package is specified, the dependencies (from package.json) of the
package in the given directory will be installed.
pkg
A package name in any format accepted by NPM, including a version
identifier
pkgs
A list of package names in the same format as the ``name`` parameter
.. versionadded:: 2014.7.0
dir
The target directory in which to install the package, or None for
global installation
runas
The user to run NPM with
registry
The NPM registry to install the package from.
.. versionadded:: 2014.7.0
env
Environment variables to set when invoking npm. Uses the same ``env``
format as the :py:func:`cmd.run <salt.modules.cmdmod.run>` execution
function.
.. versionadded:: 2014.7.0
dry_run
Whether or not to run NPM install with --dry-run flag.
.. versionadded:: 2015.8.4
silent
Wether or not to run NPM install with --silent flag.
.. versionadded:: 2015.8.5
CLI Example:
.. code-block:: bash
salt '*' npm.install coffee-script
salt '*' npm.install coffee-script@1.0.1
'''
# Protect against injection
if pkg:
pkg = _cmd_quote(pkg)
if pkgs:
pkg_list = []
for item in pkgs:
pkg_list.append(_cmd_quote(item))
pkgs = pkg_list
if registry:
registry = _cmd_quote(registry)
cmd = ['npm', 'install']
if silent:
cmd.append('--silent')
cmd.append('--json')
if dir is None:
cmd.append(' --global')
if registry:
cmd.append(' --registry="{0}"'.format(registry))
if dry_run:
cmd.append('--dry-run')
if pkg:
cmd.append(pkg)
elif pkgs:
cmd.extend(pkgs)
if env is None:
env = {}
if runas:
uid = salt.utils.get_uid(runas)
if uid:
env.update({'SUDO_UID': b'{0}'.format(uid), 'SUDO_USER': b''})
cmd = ' '.join(cmd)
result = __salt__['cmd.run_all'](cmd, python_shell=True, cwd=dir, runas=runas, env=env)
if result['retcode'] != 0:
raise CommandExecutionError(result['stderr'])
# npm >1.2.21 is putting the output to stderr even though retcode is 0
npm_output = result['stdout'] or result['stderr']
try:
return json.loads(npm_output)
except ValueError:
# Not JSON! Try to coax the json out of it!
pass
lines = npm_output.splitlines()
log.error(lines)
while lines:
# Strip all lines until JSON output starts
while not lines[0].startswith('{') and not lines[0].startswith('['):
lines = lines[1:]
try:
return json.loads(''.join(lines))
except ValueError:
lines = lines[1:]
# Still no JSON!! Return the stdout as a string
return npm_output
def uninstall(pkg,
dir=None,
runas=None,
env=None):
'''
Uninstall an NPM package.
If no directory is specified, the package will be uninstalled globally.
pkg
A package name in any format accepted by NPM
dir
The target directory from which to uninstall the package, or None for
global installation
runas
The user to run NPM with
env
Environment variables to set when invoking npm. Uses the same ``env``
format as the :py:func:`cmd.run <salt.modules.cmdmod.run>` execution
function.
.. versionadded:: 2015.5.3
CLI Example:
.. code-block:: bash
salt '*' npm.uninstall coffee-script
'''
# Protect against injection
if pkg:
pkg = _cmd_quote(pkg)
if env is None:
env = {}
if runas:
uid = salt.utils.get_uid(runas)
if uid:
env.update({'SUDO_UID': b'{0}'.format(uid), 'SUDO_USER': b''})
cmd = 'npm uninstall'
if dir is None:
cmd += ' --global'
cmd += ' "{0}"'.format(pkg)
result = __salt__['cmd.run_all'](cmd, python_shell=True, cwd=dir, runas=runas, env=env)
if result['retcode'] != 0:
log.error(result['stderr'])
return False
return True
def list_(pkg=None,
dir=None,
runas=None,
env=None):
'''
List installed NPM packages.
If no directory is specified, this will return the list of globally-
installed packages.
pkg
Limit package listing by name
dir
The directory whose packages will be listed, or None for global
installation
runas
The user to run NPM with
.. versionadded:: 2014.7.0
env
Environment variables to set when invoking npm. Uses the same ``env``
format as the :py:func:`cmd.run <salt.modules.cmdmod.run>` execution
function.
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' npm.list
'''
# Protect against injection
if pkg:
pkg = _cmd_quote(pkg)
if env is None:
env = {}
if runas:
uid = salt.utils.get_uid(runas)
if uid:
env.update({'SUDO_UID': b'{0}'.format(uid), 'SUDO_USER': b''})
cmd = 'npm list --silent --json'
if dir is None:
cmd += ' --global'
if pkg:
cmd += ' "{0}"'.format(pkg)
result = __salt__['cmd.run_all'](
cmd,
cwd=dir,
runas=runas,
env=env,
python_shell=True,
ignore_retcode=True)
# npm will return error code 1 for both no packages found and an actual
# error. The only difference between the two cases are if stderr is empty
if result['retcode'] != 0 and result['stderr']:
raise CommandExecutionError(result['stderr'])
return json.loads(result['stdout']).get('dependencies', {})
| |
from unittest import TestCase
import copy
import io
import yaml
import itertools
from dax.processor_parser import ProcessorParser
from dax.processors import AutoProcessor
from dax.tests import unit_test_entity_common as common
from dax.tests import unit_test_common_processor_yamls as yamls
from dax import yaml_doc
# test matrix
# ===========
# select keywords
# . foreach, foreach(i), one, some(n), all, malformed
# resources
# . well-formed
# . one, many
# . malformed
# . none, duplicates (intra), duplicates (inter)
# . present / not present
# assessor statuses
# . all statuses
sess_path = '/projects/{}/subjects/{}/experiments/{}'
scan_path = '/projects/{}/subjects/{}/experiments/{}/scans/{}'
assessor_path = '/projects/{}/subjects/{}/experiments/{}/assessors/{}'
scan_path_r = scan_path + '/resources/{}'
assessor_path_r = assessor_path + '/out/resources/{}'
class TestResource:
def __init__(self, label, file_count):
self.label_ = label
self.file_count_ = file_count
def label(self):
return self.label_
def file_count(self):
return self.file_count_
class TestArtefact:
def __init__(self):
self.test_obj_type = None
self.proj = None
self.subj = None
self.sess = None
self.label_ = None
self.artefact_type = None
self.quality_ = None
self.resources_ = None
self.inputs = None
def OldInit(self, test_obj_type, proj, subj, sess, label, artefact_type,
quality, resources, inputs=None):
self.test_obj_type = test_obj_type
self.proj = proj
self.subj = subj
self.sess = sess
self.label_ = label
self.artefact_type = artefact_type
self.quality_ = quality
self.resources_ = [TestResource(r[0], r[1]) for r in resources]
self.inputs = inputs
return self
def NewInit(self, proj, subj, sess, artefact):
if artefact['category'] not in ['scan', 'assessor']:
raise RuntimeError(
'Artefact category must be one of scan or assessor')
self.test_obj_type = artefact['category']
self.proj = proj
self.subj = subj
self.sess = sess
self.label_ = artefact['name']
self.artefact_type = artefact['type']
self.quality_ = artefact['quality']
self.resources_ =\
[TestResource(r.restype, len(r.files))
for r in artefact['resources']]
if artefact['category'] == 'assessor':
self.inputs = artefact['artefacts']
return self
def project_id(self):
return self.proj
def subject_id(self):
return self.subj
def session_id(self):
return self.sess
def label(self):
return self.label_
def full_path(self):
if self.test_obj_type == 'scan':
return scan_path.format(
self.proj, self.subj, self.sess, self.label_)
elif self.test_obj_type == 'assessor':
return assessor_path.format(
self.proj, self.subj, self.sess, self.label_)
else:
raise RuntimeError('invalid artefact type')
def type(self):
return self.artefact_type
def quality(self):
return self.quality_
def usable(self):
return self.quality() == 'usable'
def unusable(self):
return self.quality() == 'unusable'
def resources(self):
return self.resources_
def get_inputs(self):
return self.inputs
proj = 'proj1'
subj = 'subj1'
sess = 'sess1'
class TestSession:
def __init__(self):
self.scans_ = None
self.assessors_ = None
def OldInit(self, proj, subj, sess, scans, asrs):
self.project_id_ = proj
self.subject_id_ = subj
self.session_id_ = sess
self.scans_ = [
TestArtefact().OldInit("scan", s[0], s[1], s[2], s[3], s[4], s[5], s[6])
for s in scans]
self.assessors_ = [
TestArtefact().OldInit("assessor", a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7])
for a in asrs]
return self
def NewInit(self, proj, subj, sess, artefacts):
self.project_id_ = proj
self.subject_id_ = subj
self.session_id_ = sess
self.scans_ = []
self.assessors_ = []
for a in artefacts:
artefact = TestArtefact().NewInit(proj, subj, sess, a)
if a['category'] == 'scan':
self.scans_.append(artefact)
else:
self.assessors_.append(artefact)
return self
def scans(self):
return self.scans_
def assessors(self):
return self.assessors_
def project_id(self):
return self.project_id_
def subject_id(self):
return self.subject_id_
def session_id(self):
return self.session_id_
def full_path(self):
sess_path.format(self.proj, self.subj, self.sess)
scan_files = [('SNAPSHOTS', 2), ('NIFTI', 1)]
asr_prefix = '-x-'.join((proj, subj, sess, ''))
asr_files = [
('LABELS', 1), ('PDF', 1), ('BIAS_COR', 1), ('PRIOR', 1), ('SEG', 1),
('STATS', 1), ('SNAPSHOTS', 2), ('OUTLOG', 1), ('PBS', 1)
]
# unit test 1
xnat_scan_contents_1 = [
(proj, subj, sess, "1", "T1W", "usable", copy.deepcopy(scan_files)),
(proj, subj, sess, "2", "T1w", "unusable", copy.deepcopy(scan_files)),
(proj, subj, sess, "3", "T1", "usable", copy.deepcopy(scan_files)),
(proj, subj, sess, "4", "T1", "usable", copy.deepcopy(scan_files)),
(proj, subj, sess, "11", "FLAIR", "usable", copy.deepcopy(scan_files)),
(proj, subj, sess, "12", "FLAIR", "usable", copy.deepcopy(scan_files)),
(proj, subj, sess, "21", "X3", "usable", copy.deepcopy(scan_files)),
(proj, subj, sess, "22", "X3", "usable", copy.deepcopy(scan_files))
]
xnat_scan_contents_t1_no_fl_no_t2 = [
(proj, subj, sess, '1', 'T1', 'usable', copy.deepcopy(scan_files)),
(proj, subj, sess, '2', 'T1', 'usable', copy.deepcopy(scan_files))
]
xnat_scan_contents_no_t1_fl_no_t2 = [
(proj, subj, sess, '11', 'T2', 'usable', copy.deepcopy(scan_files)),
(proj, subj, sess, '12', 'T2', 'usable', copy.deepcopy(scan_files))
]
xnat_assessor_inputs_1 = {
'proc1-asr1': {'scan1': scan_path.format(proj, subj, sess, '1')},
'proc1-asr2': {'scan1': scan_path.format(proj, subj, sess, '2')},
'proc2-asr1': {
'scan1': scan_path.format(proj, subj, sess, '1'),
'scan2': scan_path.format(proj, subj, sess, '11'),
'scan3': [
scan_path.format(proj, subj, sess, '21'),
scan_path.format(proj, subj, sess, '22')
],
'scan4': None,
'asr1': assessor_path.format(proj, subj, sess, 'proc1-asr1')
}
}
xnat_assessor_contents_1 = [
(proj, subj, sess, "proc1-asr1", "proc1", "usable",
copy.deepcopy(asr_files), xnat_assessor_inputs_1['proc1-asr1']),
(proj, subj, sess, "proc1-asr2", "proc1", "usable",
copy.deepcopy(asr_files), xnat_assessor_inputs_1['proc1-asr2']),
(proj, subj, sess, "proc2-asr1", "proc2", "usable",
copy.deepcopy(asr_files), xnat_assessor_inputs_1['proc2-asr1'])
]
scan_gif_parcellation_yaml = """
---
inputs:
default:
spider_path: /home/dax/Xnat-management/comic100_dax_config/pipelines/GIF_parcellation/v3.0.0/Spider_GIF_Parcellation_v3_0_0.py
working_dir: /scratch0/dax/
nipype_exe: perform_gif_propagation.py
db: /share/apps/cmic/GIF/db/db.xml
xnat:
scans:
- name: scan1
types: T1w,MPRAGE,T1,T1W
needs_qc: True
resources:
- resource: NIFTI
varname: t1
- name: scan2
types: FLAIR
select: foreach
resources:
- resource: NIFTI
varname: fl
- name: scan3
types: X3
select: all
- name: scan4
types: X4
select: one
assessors:
- name: asr1
proctypes: proc1
select: foreach(scan2)
resources:
- resource: SEG
varname: seg
command: python {spider_path} --t1 {t1} --fl {fl} --seg {seg} --dbt {db} --exe {nipype_exe}
attrs:
suffix:
xsitype: proc:genProcData
walltime: 24:00:00
memory: 3850
ppn: 4
env: /share/apps/cmic/NiftyPipe/v2.0/setup_v2.0.sh
type: scan
scan_nb: scan11
"""
# unit test 2
xnat_scan_contents_2 = [
(proj, subj, sess, "1", "T1", "usable", copy.deepcopy(scan_files)),
(proj, subj, sess, "2", "T1", "usable", copy.deepcopy(scan_files)),
(proj, subj, sess, "11", "FLAIR", "usable", copy.deepcopy(scan_files)),
(proj, subj, sess, "12", "FLAIR", "usable", copy.deepcopy(scan_files))
]
xnat_assessor_inputs_2 = {
'proc1-asr1': {
'scan1': scan_path.format(proj, subj, sess, '1'),
'scan2': scan_path.format(proj, subj, sess, '11')
},
'proc1-asr2': {
'scan1': scan_path.format(proj, subj, sess, '2'),
'scan2': scan_path.format(proj, subj, sess, '12')
}
}
xnat_assessor_contents_2 = [
(proj, subj, sess, "proc1-asr1", "proc1", "usable",
copy.deepcopy(asr_files), xnat_assessor_inputs_2['proc1-asr1']),
(proj, subj, sess, "proc1-asr2", "proc1", "usable",
copy.deepcopy(asr_files), xnat_assessor_inputs_2['proc1-asr2']),
]
processor_yaml_foreach_map = yamls.generate_yaml(
'proc2',
scans=[
{
'name': 'scanx', 'types': 'T1',
'select': 'foreach',
'resources': [
{'type': 'NIFTI', 'name': 't1'}
]
},
{
'name': 'scany', 'types': 'FLAIR',
'select': 'foreach(scanx)',
'resources': [
{'type': 'NIFTI', 'name': 'fl'}
]
},
],
assessors=[]
)
processor_yaml_all = yamls.generate_yaml(
'proc2',
scans=[
{
'name': 'scanx', 'types': 'T1',
'select': 'foreach',
'resources': [
{'type': 'NIFTI', 'name': 't1'}
]
},
{
'name': 'scany', 'types': 'FLAIR',
'select': 'all',
'resources': [
{'type': 'NIFTI', 'name': 'fl'}
]
},
],
assessors=[]
)
processor_yaml_2 = yamls.generate_yaml(
'proc2',
scans=[
{
'name': 'scan_t', 'types': 'T1',
'select': 'from(asr1/scan1)',
'resources': [
{'type': 'NIFTI', 'name': 't1'}
]
},
{
'name': 'scan_f', 'types': 'FLAIR',
'select': 'from(asr1/scan2)',
'resources': [
{'type': 'NIFTI', 'name': 'fl'}
]
}
],
assessors=[{
'name': 'asr1', 'types': 'proc1',
'resources': [
{'type': 'SEG', 'name': 'seg'}
]
}]
)
processor_yaml_from_two_assessors = yamls.generate_yaml(
'proc3',
scans=[
{
'name': 'scan_t1', 'types': 'T1',
'select': 'from(asr2/scan1)',
'resources': [
{'type': 'NIFTI', 'name': 't1'}
]
},
{
'name': 'scan_f', 'types': 'FLAIR',
'select': 'from(asr2/scan2)',
'resources': [
{'type': 'NIFTI', 'name': 'fl'}
]
},
{
'name': 'scan_t2', 'types': 'T2',
'select': 'from(asr1/scan1)',
'resources': [
{'type': 'NIFTI', 'name': 't2'}
]
}
],
assessors=[
{
'name': 'asr1', 'types': 'proc1',
'resources': [
{'type': 'SEG', 'name': 'seg1'}
]
},
{
'name': 'asr2', 'types': 'proc2',
'resources': [
{'type': 'SEG', 'name': 'seg2'}
]
}
]
)
class ArtefactResource:
def __init__(self, restype, required, files):
self.restype = restype
self.required = required
self.files = files
def __repr__(self):
return "{} ({}: {}, {}: {}, {}: {}".format(
self.__class__.__name__,
'restype', self.restype,
'required', self.required,
'files', self.files
)
class YamlVariable:
def __init__(self, restype, varname, required):
self.restype = restype
self.varname = varname
self.required = required
def __repr__(self):
return "{} ({}: {}, {}: {}, {}: {})".format(
self.__class__.__name__,
'restype', self.restype,
'varname', self.varname,
'required', self.required
)
class ProcessorTest(TestCase):
def test_new_processor(self):
yd = yaml_doc.YamlDoc().from_string(scan_gif_parcellation_yaml)
ap = AutoProcessor(common.FakeXnat, yd)
class ProcessorParserUnitTests(TestCase):
def __generate_scans(self, proj, subj, sess, scan_descriptors):
contents = list()
for s, d in scan_descriptors:
contents.append(
(proj, subj, sess, s, d, "usable", copy.deepcopy(scan_files))
)
return contents
def __run_processor_parser_unit_tests(self,
scan_contents,
assessor_contents,
processor_yaml,
expected=None):
csess = [TestSession().OldInit(proj, subj, sess, scan_contents,
assessor_contents)]
doc = yaml.load((io.StringIO(processor_yaml)))
inputs, inputs_by_type, iteration_sources, iteration_map,\
prior_session_count =\
ProcessorParser.parse_inputs(doc)
print(("inputs =", inputs))
print(("inputs_by_type =", inputs_by_type))
print(("iteration_sources =", iteration_sources))
print(("iteration_map =", iteration_map))
print(("prior_session_count =", prior_session_count))
artefacts = ProcessorParser.parse_artefacts(csess)
print(("artefacts =", artefacts))
artefacts_by_input = \
ProcessorParser.map_artefacts_to_inputs(csess, inputs, inputs_by_type)
print(("artefacts_by_input =", artefacts_by_input))
# variables_to_inputs = \
# ProcessorParser.parse_variables(inputs)
# print "variables_to_inputs =", variables_to_inputs
parameter_matrix = \
ProcessorParser.generate_parameter_matrix(
inputs, iteration_sources, iteration_map,
artefacts, artefacts_by_input)
print(("parameter_matrix =", parameter_matrix))
assessor_parameter_map = \
ProcessorParser.compare_to_existing(csess,
'proc2',
parameter_matrix)
print(("assessor_parameter_map = ", assessor_parameter_map))
if expected is None:
self.assertTrue(False, 'No expected results provided: no test validation')
else:
for p in parameter_matrix:
error = 'entry {} is not in expected; parameter matrix = {}, expected = {}'
self.assertTrue(p in expected, error.format(p, parameter_matrix, expected))
def test_processor_parser_experimental_1(self):
expected = [
{'scan4': None,
'asr1': '/projects/proj1/subjects/subj1/experiments/sess1/assessors/proc1-asr1',
'scan1': '/projects/proj1/subjects/subj1/experiments/sess1/scans/1',
'scan2': '/projects/proj1/subjects/subj1/experiments/sess1/scans/11',
'scan3': ['/projects/proj1/subjects/subj1/experiments/sess1/scans/21',
'/projects/proj1/subjects/subj1/experiments/sess1/scans/22']},
{'scan4': None,
'asr1': '/projects/proj1/subjects/subj1/experiments/sess1/assessors/proc1-asr2',
'scan1': '/projects/proj1/subjects/subj1/experiments/sess1/scans/1',
'scan2': '/projects/proj1/subjects/subj1/experiments/sess1/scans/12',
'scan3': ['/projects/proj1/subjects/subj1/experiments/sess1/scans/21',
'/projects/proj1/subjects/subj1/experiments/sess1/scans/22']},
{'scan4': None,
'asr1': '/projects/proj1/subjects/subj1/experiments/sess1/assessors/proc1-asr1',
'scan1': '/projects/proj1/subjects/subj1/experiments/sess1/scans/2',
'scan2': '/projects/proj1/subjects/subj1/experiments/sess1/scans/11',
'scan3': ['/projects/proj1/subjects/subj1/experiments/sess1/scans/21',
'/projects/proj1/subjects/subj1/experiments/sess1/scans/22']},
{'scan4': None,
'asr1': '/projects/proj1/subjects/subj1/experiments/sess1/assessors/proc1-asr2',
'scan1': '/projects/proj1/subjects/subj1/experiments/sess1/scans/2',
'scan2': '/projects/proj1/subjects/subj1/experiments/sess1/scans/12',
'scan3': ['/projects/proj1/subjects/subj1/experiments/sess1/scans/21',
'/projects/proj1/subjects/subj1/experiments/sess1/scans/22']},
{'scan4': None,
'asr1': '/projects/proj1/subjects/subj1/experiments/sess1/assessors/proc1-asr1',
'scan1': '/projects/proj1/subjects/subj1/experiments/sess1/scans/3',
'scan2': '/projects/proj1/subjects/subj1/experiments/sess1/scans/11',
'scan3': ['/projects/proj1/subjects/subj1/experiments/sess1/scans/21',
'/projects/proj1/subjects/subj1/experiments/sess1/scans/22']},
{'scan4': None,
'asr1': '/projects/proj1/subjects/subj1/experiments/sess1/assessors/proc1-asr2',
'scan1': '/projects/proj1/subjects/subj1/experiments/sess1/scans/3',
'scan2': '/projects/proj1/subjects/subj1/experiments/sess1/scans/12',
'scan3': ['/projects/proj1/subjects/subj1/experiments/sess1/scans/21',
'/projects/proj1/subjects/subj1/experiments/sess1/scans/22']},
{'scan4': None,
'asr1': '/projects/proj1/subjects/subj1/experiments/sess1/assessors/proc1-asr1',
'scan1': '/projects/proj1/subjects/subj1/experiments/sess1/scans/4',
'scan2': '/projects/proj1/subjects/subj1/experiments/sess1/scans/11',
'scan3': ['/projects/proj1/subjects/subj1/experiments/sess1/scans/21',
'/projects/proj1/subjects/subj1/experiments/sess1/scans/22']},
{'scan4': None,
'asr1': '/projects/proj1/subjects/subj1/experiments/sess1/assessors/proc1-asr2',
'scan1': '/projects/proj1/subjects/subj1/experiments/sess1/scans/4',
'scan2': '/projects/proj1/subjects/subj1/experiments/sess1/scans/12',
'scan3': ['/projects/proj1/subjects/subj1/experiments/sess1/scans/21',
'/projects/proj1/subjects/subj1/experiments/sess1/scans/22']}
]
self.__run_processor_parser_unit_tests(xnat_scan_contents_1,
xnat_assessor_contents_1,
scan_gif_parcellation_yaml,
expected)
def test_processor_parser_foreach_map(self):
expected = [
{'scanx': '/projects/proj1/subjects/subj1/experiments/sess1/scans/3',
'scany': '/projects/proj1/subjects/subj1/experiments/sess1/scans/11'},
{'scanx': '/projects/proj1/subjects/subj1/experiments/sess1/scans/4',
'scany': '/projects/proj1/subjects/subj1/experiments/sess1/scans/12'}
]
self.__run_processor_parser_unit_tests(xnat_scan_contents_1,
xnat_assessor_contents_1,
processor_yaml_foreach_map,
expected)
def test_processor_parser_foreach_map_no_fl(self):
expected = [
{}
]
self.__run_processor_parser_unit_tests(xnat_scan_contents_t1_no_fl_no_t2,
xnat_assessor_contents_1,
processor_yaml_foreach_map,
expected)
def test_processor_parser_foreach_map_no_t1(self):
expected = [
{}
]
self.__run_processor_parser_unit_tests(xnat_scan_contents_no_t1_fl_no_t2,
xnat_assessor_contents_1,
processor_yaml_foreach_map,
expected)
def test_processor_parser_all(self):
expected = [
{'scanx': '/projects/proj1/subjects/subj1/experiments/sess1/scans/1',
'scany': ['/projects/proj1/subjects/subj1/experiments/sess1/scans/11',
'/projects/proj1/subjects/subj1/experiments/sess1/scans/12']},
{'scanx': '/projects/proj1/subjects/subj1/experiments/sess1/scans/2',
'scany': ['/projects/proj1/subjects/subj1/experiments/sess1/scans/11',
'/projects/proj1/subjects/subj1/experiments/sess1/scans/12']},
]
scans = [('1', 'T1'), ('2', 'T1'), ('11', 'FLAIR'), ('12', 'FLAIR')]
self.__run_processor_parser_unit_tests(
self.__generate_scans('proj1', 'subj1', 'sess1', scans),
[],
processor_yaml_all,
expected),
def test_processor_parser_foreach_map(self):
expected = [
{'scanx': '/projects/proj1/subjects/subj1/experiments/sess1/scans/3',
'scany': '/projects/proj1/subjects/subj1/experiments/sess1/scans/11'},
{'scanx': '/projects/proj1/subjects/subj1/experiments/sess1/scans/4',
'scany': '/projects/proj1/subjects/subj1/experiments/sess1/scans/12'}
]
self.__run_processor_parser_unit_tests(xnat_scan_contents_1,
xnat_assessor_contents_1,
processor_yaml_foreach_map,
expected)
def test_processor_parser_experimental_2(self):
expected = [
{'scan_t': '/projects/proj1/subjects/subj1/experiments/sess1/scans/1',
'scan_f': '/projects/proj1/subjects/subj1/experiments/sess1/scans/11',
'asr1': '/projects/proj1/subjects/subj1/experiments/sess1/assessors/proc1-asr1'},
{'scan_t': '/projects/proj1/subjects/subj1/experiments/sess1/scans/2',
'scan_f': '/projects/proj1/subjects/subj1/experiments/sess1/scans/12',
'asr1': '/projects/proj1/subjects/subj1/experiments/sess1/assessors/proc1-asr2'}
]
self.__run_processor_parser_unit_tests(xnat_scan_contents_2,
xnat_assessor_contents_2,
processor_yaml_2,
expected)
@staticmethod
def __generate_test_matrix(headers, values):
table_values = itertools.product(*values)
table = [dict(zip(headers, r)) for r in table_values]
return table
@staticmethod
def __generate_yaml(entry):
scans = []
assessors = []
for input in entry['yaml_inputs']:
resources = []
for r in input['resources']:
resources.append({
'type': r.restype,
'name': r.varname,
'required': r.required
})
if input['category'] == 'scan':
scans.append({
'name': input['label'],
'types': input['type'],
'select': input['select'],
'select-session': input['select-session'],
'qc': input['needs_qc'],
'resources': resources
})
if input['category'] == 'assessor':
assessors.append({
'name': input['label'],
'types': input['type'],
'select': input['select'],
'select-session': input['select-session'],
'qc': input['needs_qc'],
'resources': resources
})
yaml_src = yamls.generate_yaml(scans=scans)
return yaml_doc.YamlDoc().from_string(yaml_src)
@staticmethod
def __generate_one_scan_scenarios():
artefact_headers = ['xsitype', 'category', 'name', 'quality', 'type',
'resources']
artefact_xsitype = ['xnat:mrScanData']
artefact_category = ['scan']
artefact_name = ['1']
artefact_quality = ['unusable', 'usable', 'preferred']
artefact_type = ['T1', 'T2']
artefact_resources = [
[],
[ArtefactResource('NIFTI', None, ['images.nii'])],
[ArtefactResource('NIFTI', False, ['images.nii'])],
[ArtefactResource('NIFTI', True, ['images.nii'])]
#[('NIFTI', ['images.nii']), ('SNAPSHOTS', ['snapshot.jpg.gz', 'snapshot(1).jpg.gz'])]
]
artefact_values = [artefact_xsitype, artefact_category, artefact_name,
artefact_quality, artefact_type, artefact_resources]
artefact_matrix = ProcessorParserUnitTests.__generate_test_matrix(
artefact_headers, artefact_values)
artefact_matrix = [[i] for i in artefact_matrix]
yaml_headers = ['category', 'label', 'type', 'select', 'select-session',
'needs_qc', 'resources']
yaml_categories = ['scan']
yaml_labels = ['scan1']
yaml_type = ['T1']
yaml_select = [None, 'foreach']
yaml_select_session = [None, 'current', 'prior(1)']
yaml_needs_qc = [None, False, True]
yaml_resources = [[YamlVariable('NIFTI', 't1', None)],
[YamlVariable('NIFTI', 't1', False)],
[YamlVariable('NIFTI', 't1', True)]]
yaml_elems = [yaml_categories, yaml_labels, yaml_type, yaml_select,
yaml_select_session, yaml_needs_qc, yaml_resources]
yaml_matrix = ProcessorParserUnitTests.__generate_test_matrix(
yaml_headers, yaml_elems
)
yaml_matrix = [[i] for i in yaml_matrix]
combined_headers = ['artefacts', 'yaml_inputs']
combined_values = [artefact_matrix, yaml_matrix]
combined_matrix = ProcessorParserUnitTests.__generate_test_matrix(
combined_headers, combined_values
)
return combined_matrix
@staticmethod
def __create_mocked_xnat(scenario):
pass
def test_one_input(self):
matrix = ProcessorParserUnitTests.__generate_one_scan_scenarios()
for m in matrix:
csess = TestSession().NewInit('proj1',
'subj1',
'sess1',
m['artefacts'])
yaml_source = ProcessorParserUnitTests.__generate_yaml(m)
try:
parser = ProcessorParser(yaml_source.contents)
parser.parse_session(csess)
print((m, '->', parser.assessor_parameter_map))
except ValueError as err:
if err.message not in\
['yaml processor is missing xnat keyword contents']:
raise
print(('scenario count = ', len(matrix)))
def test_check_valid_mode(self):
input_category = 'scan'
input_name = 'a_scan'
keyword = 'select'
valid_keywords = ['all', 'some']
errors = ProcessorParser._check_valid_mode(
input_category, input_name, keyword, valid_keywords,
{'select': 'all'})
self.assertEqual(errors, [])
errors = ProcessorParser._check_valid_mode(
input_category, input_name, keyword, valid_keywords,
{'select': 'some'})
self.assertEqual(errors, [])
errors = ProcessorParser._check_valid_mode(
input_category, input_name, keyword, valid_keywords,
{'select': 'fish'})
expected =\
[("Error: scan 'a_scan': 'select' has an invalid value 'fish'. "
"It must be one of 'all', 'some'")]
self.assertEqual(errors, expected)
| |
# -*- coding: utf-8 -*-
import logging
from datetime import datetime
import pymongo
from bson.objectid import ObjectId
from pymongo import ReturnDocument
LOG = logging.getLogger(__name__)
class ClinVarHandler(object):
"""Class to handle clinvar submissions for the mongo adapter"""
def create_submission(self, institute_id):
"""Create an open clinvar submission for an institute
Args:
institute_id(str): an institute ID
returns:
submission(obj): an open clinvar submission object
"""
submission_obj = {
"status": "open",
"created_at": datetime.now(),
"institute_id": institute_id,
}
LOG.info("Creating a new clinvar submission institute %s", institute_id)
result = self.clinvar_submission_collection.insert_one(submission_obj)
return result.inserted_id
def delete_submission(self, submission_id):
"""Deletes a Clinvar submission object, along with all associated clinvar objects (variants and casedata)
Args:
submission_id(str): the ID of the submission to be deleted
Returns:
deleted_objects(int): the number of associated objects removed (variants and/or casedata)
deleted_submissions(int): 1 if it's deleted, 0 if something went wrong
"""
LOG.info("Deleting clinvar submission %s", submission_id)
submission_obj = self.clinvar_submission_collection.find_one(
{"_id": ObjectId(submission_id)}
)
submission_variants = submission_obj.get("variant_data")
submission_casedata = submission_obj.get("case_data")
submission_objects = []
if submission_variants and submission_casedata:
submission_objects = submission_variants + submission_casedata
elif submission_variants:
submission_objects = submission_variants
elif submission_casedata:
submission_objects = submission_casedata
# Delete all variants and casedata objects associated with this submission
result = self.clinvar_collection.delete_many({"_id": {"$in": submission_objects}})
deleted_objects = result.deleted_count
# Delete the submission itself
result = self.clinvar_submission_collection.delete_one({"_id": ObjectId(submission_id)})
deleted_submissions = result.deleted_count
# return deleted_count, deleted_submissions
return deleted_objects, deleted_submissions
def get_open_clinvar_submission(self, institute_id):
"""Retrieve the database id of an open clinvar submission for an institute,
if none is available then create a new submission and return it
Args:
institute_id(str): an institute ID
Returns:
submission(obj) : an open clinvar submission object
"""
LOG.info("Retrieving an open clinvar submission for institute %s", institute_id)
query = dict(institute_id=institute_id, status="open")
submission = self.clinvar_submission_collection.find_one(query)
# If there is no open submission for this institute, create one
if submission is None:
submission_id = self.create_submission(institute_id)
submission = self.clinvar_submission_collection.find_one({"_id": submission_id})
return submission
def update_clinvar_id(self, clinvar_id, submission_id):
"""saves an official clinvar submission ID in a clinvar submission object
Args:
clinvar_id(str): a string with a format: SUB[0-9]. It is obtained from clinvar portal when starting a new submission
submission_id(str): submission_id(str) : id of the submission to be updated
Returns:
updated_submission(obj): a clinvar submission object, updated
"""
updated_submission = self.clinvar_submission_collection.find_one_and_update(
{"_id": ObjectId(submission_id)},
{"$set": {"clinvar_subm_id": clinvar_id, "updated_at": datetime.now()}},
upsert=True,
return_document=pymongo.ReturnDocument.AFTER,
)
return updated_submission
def get_clinvar_id(self, submission_id):
"""Returns the official Clinvar submission ID for a submission object
Args:
submission_id(str): submission_id(str) : id of the submission
Returns:
clinvar_subm_id(str): a string with a format: SUB[0-9]. It is obtained from clinvar portal when starting a new submission
"""
submission_obj = self.clinvar_submission_collection.find_one(
{"_id": ObjectId(submission_id)}
)
clinvar_subm_id = submission_obj.get(
"clinvar_subm_id"
) # This key does not exist if it was not previously provided by user
return clinvar_subm_id
def add_to_submission(self, submission_id, submission_objects):
"""Adds submission_objects to clinvar collection and update the coresponding submission object with their id
Args:
submission_id(str) : id of the submission to be updated
submission_objects(tuple): a tuple of 2 elements coresponding to a list of variants and a list of case data objects to add to submission
Returns:
updated_submission(obj): an open clinvar submission object, updated
"""
LOG.info(
"Adding new variants and case data to clinvar submission '%s'",
submission_id,
)
# Insert variant submission_objects into clinvar collection
# Loop over the objects
for var_obj in submission_objects[0]:
try:
result = self.clinvar_collection.insert_one(var_obj)
self.clinvar_submission_collection.update_one(
{"_id": submission_id},
{"$push": {"variant_data": str(result.inserted_id)}},
upsert=True,
)
except pymongo.errors.DuplicateKeyError:
LOG.error("Attepted to insert a clinvar variant which is already in DB!")
# Insert casedata submission_objects into clinvar collection
if submission_objects[1]:
# Loop over the objects
for case_obj in submission_objects[1]:
try:
result = self.clinvar_collection.insert_one(case_obj)
self.clinvar_submission_collection.update_one(
{"_id": submission_id},
{"$push": {"case_data": str(result.inserted_id)}},
upsert=True,
)
except pymongo.errors.DuplicateKeyError:
LOG.error(
"One or more casedata object is already present in clinvar collection!"
)
updated_submission = self.clinvar_submission_collection.find_one_and_update(
{"_id": submission_id},
{"$set": {"updated_at": datetime.now()}},
return_document=pymongo.ReturnDocument.AFTER,
)
return updated_submission
def update_clinvar_submission_status(self, institute_id, submission_id, status):
"""Set a clinvar submission ID to 'closed'
Args:
submission_id(str): the ID of the clinvar submission to close
Return
updated_submission(obj): the submission object with a 'closed' status
"""
LOG.info('closing clinvar submission "%s"', submission_id)
if (
status == "open"
): # just close the submission its status does not affect the other submissions
# Close all other submissions for this institute and then open the desired one
self.clinvar_submission_collection.update_many(
{"institute_id": institute_id},
{"$set": {"status": "closed", "updated_at": datetime.now()}},
)
updated_submission = self.clinvar_submission_collection.find_one_and_update(
{"_id": ObjectId(submission_id)},
{"$set": {"status": status, "updated_at": datetime.now()}},
return_document=pymongo.ReturnDocument.AFTER,
)
return updated_submission
def sort_clinvar_case_data(self, variant_list, case_data_list):
"""Sort Case Data for a ClinVar submission reflecting the order of the submission's Variant Data.
Args:
variant_list(list): The list of variants in a ClinVar submission (list of dictionaries)
case_data_list(list): The list of Case info, each relative to a variant in a submission (list of dictionaries)
Returns:
sorted_case_data(list): case_data dictionaries sorted according to the order of variants
"""
# Sort case data according to the order of submissions variant data:
sorted_case_data = []
# Loop over submission variants
for variant_info in variant_list:
# Loop over submission case data
for cdata_info in case_data_list:
# Check that case data linking_id and variant linking_id match to sort case data
if cdata_info["linking_id"] != variant_info["linking_id"]:
continue
sorted_case_data.append(cdata_info)
return sorted_case_data or case_data_list
def clinvar_submissions(self, institute_id):
"""Collect all open and closed clinvar submissions for an institute
Args:
institute_id(str): an institute ID
Returns:
submissions(list): a list of clinvar submission objects
"""
LOG.info("Retrieving all clinvar submissions for institute '%s'", institute_id)
# get first all submission objects
query = dict(institute_id=institute_id)
results = list(self.clinvar_submission_collection.find(query))
submissions = []
# Loop over all ClinVar submissions for an institute
for result in results:
submission = {}
cases = {}
submission["_id"] = result.get("_id")
submission["status"] = result.get("status")
submission["institute_id"] = result.get("institute_id")
submission["created_at"] = result.get("created_at")
submission["updated_at"] = result.get("updated_at")
if "clinvar_subm_id" in result:
submission["clinvar_subm_id"] = result["clinvar_subm_id"]
# If submission has variants registered
if result.get("variant_data"):
submission["variant_data"] = list(
self.clinvar_collection.find({"_id": {"$in": result["variant_data"]}}).sort(
"last_evaluated", pymongo.ASCENDING
)
)
# Loop over variants contained in a single ClinVar submission
for var_data_id in list(result["variant_data"]):
# get case_id from variant id (caseID_variant_ID)
case_id = var_data_id.rsplit("_", 1)[0]
case_obj = self.case(case_id=case_id)
cases[case_id] = case_obj.get("display_name")
submission["cases"] = cases
# If submission has case data registered
if result.get("case_data"):
unsorted_case_data = list(
self.clinvar_collection.find({"_id": {"$in": result["case_data"]}})
)
submission["case_data"] = self.sort_clinvar_case_data(
submission.get("variant_data", []), unsorted_case_data or []
)
submissions.append(submission)
return submissions
def clinvar_objs(self, submission_id, key_id):
"""Collects a list of objects from the clinvar collection (variants of case data) as specified by the key_id in the clinvar submission
Args:
submission_id(str): the _id key of a clinvar submission
key_id(str) : either 'variant_data' or 'case_data'. It's a key in a clinvar_submission object.
Its value is a list of ids of clinvar objects (either variants of casedata objects)
Returns:
clinvar_objects(list) : a list of clinvar objects (either variants of casedata)
"""
# Get a submission object
submission = self.clinvar_submission_collection.find_one({"_id": ObjectId(submission_id)})
# a list of clinvar object ids, they can be of csv_type 'variant' or 'casedata'
if submission.get(key_id):
clinvar_obj_ids = list(submission.get(key_id))
clinvar_objects = self.clinvar_collection.find({"_id": {"$in": clinvar_obj_ids}})
return list(clinvar_objects)
return None
def rename_casedata_samples(self, submission_id, case_id, old_name, new_name):
"""Rename all samples associated to a clinVar submission
Args:
submission_id(str): the _id key of a clinvar submission
case_id(str): id of case
old_name(str): old name of an individual in case data
new_name(str): new name of an individual in case data
Returns:
renamed_samples(int)
"""
renamed_samples = 0
LOG.info(
f"Renaming clinvar submission {submission_id}, case {case_id} individual {old_name} to {new_name}"
)
casedata_objs = self.clinvar_objs(submission_id, "case_data")
for obj in casedata_objs:
if obj.get("individual_id") == old_name and obj.get("case_id") == case_id:
result = self.clinvar_collection.find_one_and_update(
{"_id": obj["_id"]},
{"$set": {"individual_id": new_name}},
return_document=ReturnDocument.AFTER,
)
if result:
renamed_samples += 1
return renamed_samples
def delete_clinvar_object(self, object_id, object_type, submission_id):
"""Remove a variant object from clinvar database and update the relative submission object
Args:
object_id(str) : the id of an object to remove from clinvar_collection database collection (a variant of a case)
object_type(str) : either 'variant_data' or 'case_data'. It's a key in the clinvar_submission object.
submission_id(str): the _id key of a clinvar submission
Returns:
updated_submission(obj): an updated clinvar submission
"""
LOG.info("Deleting clinvar object %s (%s)", object_id, object_type)
# If it's a variant object to be removed:
# remove reference to it in the submission object 'variant_data' list field
# remove the variant object from clinvar collection
# remove casedata object from clinvar collection
# remove reference to it in the submission object 'caset_data' list field
# if it's a casedata object to be removed:
# remove reference to it in the submission object 'caset_data' list field
# remove casedata object from clinvar collection
result = ""
if object_type == "variant_data":
# pull out a variant from submission object
self.clinvar_submission_collection.find_one_and_update(
{"_id": ObjectId(submission_id)}, {"$pull": {"variant_data": object_id}}
)
variant_object = self.clinvar_collection.find_one({"_id": object_id})
linking_id = variant_object.get(
"linking_id"
) # it's the original ID of the variant in scout, it's linking clinvar variants and casedata objects together
# remove any object with that linking_id from clinvar_collection. This removes variant and casedata
result = self.clinvar_collection.delete_many({"linking_id": linking_id})
else: # remove case_data but keep variant in submission
# delete the object itself from clinvar_collection
result = self.clinvar_collection.delete_one({"_id": object_id})
# in any case remove reference to it in the submission object 'caset_data' list field
self.clinvar_submission_collection.find_one_and_update(
{"_id": ObjectId(submission_id)}, {"$pull": {"case_data": object_id}}
)
updated_submission = self.clinvar_submission_collection.find_one_and_update(
{"_id": submission_id},
{"$set": {"updated_at": datetime.now()}},
return_document=pymongo.ReturnDocument.AFTER,
)
return updated_submission
def case_to_clinVars(self, case_id):
"""Get all variants included in clinvar submissions for a case
Args:
case_id(str): a case _id
Returns:
submission_variants(dict): keys are variant ids and values are variant submission objects
"""
query = dict(case_id=case_id, csv_type="variant")
clinvar_objs = list(self.clinvar_collection.find(query))
submitted_vars = {}
for clinvar in clinvar_objs:
submitted_vars[clinvar.get("local_id")] = clinvar
return submitted_vars
| |
"""Backing implementation for InstallRequirement's various constructors
The idea here is that these formed a major chunk of InstallRequirement's size
so, moving them and support code dedicated to them outside of that class
helps creates for better understandability for the rest of the code.
These are meant to be used elsewhere within pip to create instances of
InstallRequirement.
"""
import logging
import os
import re
from typing import Any, Dict, Optional, Set, Tuple, Union
from pip._vendor.packaging.markers import Marker
from pip._vendor.packaging.requirements import InvalidRequirement, Requirement
from pip._vendor.packaging.specifiers import Specifier
from pip._vendor.pkg_resources import RequirementParseError, parse_requirements
from pip._internal.exceptions import InstallationError
from pip._internal.models.index import PyPI, TestPyPI
from pip._internal.models.link import Link
from pip._internal.models.wheel import Wheel
from pip._internal.pyproject import make_pyproject_path
from pip._internal.req.req_file import ParsedRequirement
from pip._internal.req.req_install import InstallRequirement
from pip._internal.utils.filetypes import is_archive_file
from pip._internal.utils.misc import is_installable_dir
from pip._internal.utils.urls import path_to_url
from pip._internal.vcs import is_url, vcs
__all__ = [
"install_req_from_editable", "install_req_from_line",
"parse_editable"
]
logger = logging.getLogger(__name__)
operators = Specifier._operators.keys()
def _strip_extras(path: str) -> Tuple[str, Optional[str]]:
m = re.match(r'^(.+)(\[[^\]]+\])$', path)
extras = None
if m:
path_no_extras = m.group(1)
extras = m.group(2)
else:
path_no_extras = path
return path_no_extras, extras
def convert_extras(extras: Optional[str]) -> Set[str]:
if not extras:
return set()
return Requirement("placeholder" + extras.lower()).extras
def parse_editable(editable_req: str) -> Tuple[Optional[str], str, Set[str]]:
"""Parses an editable requirement into:
- a requirement name
- an URL
- extras
- editable options
Accepted requirements:
svn+http://blahblah@rev#egg=Foobar[baz]&subdirectory=version_subdir
.[some_extra]
"""
url = editable_req
# If a file path is specified with extras, strip off the extras.
url_no_extras, extras = _strip_extras(url)
if os.path.isdir(url_no_extras):
setup_py = os.path.join(url_no_extras, 'setup.py')
setup_cfg = os.path.join(url_no_extras, 'setup.cfg')
if not os.path.exists(setup_py) and not os.path.exists(setup_cfg):
msg = (
'File "setup.py" or "setup.cfg" not found. Directory cannot be '
'installed in editable mode: {}'
.format(os.path.abspath(url_no_extras))
)
pyproject_path = make_pyproject_path(url_no_extras)
if os.path.isfile(pyproject_path):
msg += (
'\n(A "pyproject.toml" file was found, but editable '
'mode currently requires a setuptools-based build.)'
)
raise InstallationError(msg)
# Treating it as code that has already been checked out
url_no_extras = path_to_url(url_no_extras)
if url_no_extras.lower().startswith('file:'):
package_name = Link(url_no_extras).egg_fragment
if extras:
return (
package_name,
url_no_extras,
Requirement("placeholder" + extras.lower()).extras,
)
else:
return package_name, url_no_extras, set()
for version_control in vcs:
if url.lower().startswith(f'{version_control}:'):
url = f'{version_control}+{url}'
break
link = Link(url)
if not link.is_vcs:
backends = ", ".join(vcs.all_schemes)
raise InstallationError(
f'{editable_req} is not a valid editable requirement. '
f'It should either be a path to a local project or a VCS URL '
f'(beginning with {backends}).'
)
package_name = link.egg_fragment
if not package_name:
raise InstallationError(
"Could not detect requirement name for '{}', please specify one "
"with #egg=your_package_name".format(editable_req)
)
return package_name, url, set()
def deduce_helpful_msg(req: str) -> str:
"""Returns helpful msg in case requirements file does not exist,
or cannot be parsed.
:params req: Requirements file path
"""
msg = ""
if os.path.exists(req):
msg = " The path does exist. "
# Try to parse and check if it is a requirements file.
try:
with open(req) as fp:
# parse first line only
next(parse_requirements(fp.read()))
msg += (
"The argument you provided "
"({}) appears to be a"
" requirements file. If that is the"
" case, use the '-r' flag to install"
" the packages specified within it."
).format(req)
except RequirementParseError:
logger.debug(
"Cannot parse '%s' as requirements file", req, exc_info=True
)
else:
msg += f" File '{req}' does not exist."
return msg
class RequirementParts:
def __init__(
self,
requirement: Optional[Requirement],
link: Optional[Link],
markers: Optional[Marker],
extras: Set[str],
):
self.requirement = requirement
self.link = link
self.markers = markers
self.extras = extras
def parse_req_from_editable(editable_req: str) -> RequirementParts:
name, url, extras_override = parse_editable(editable_req)
if name is not None:
try:
req: Optional[Requirement] = Requirement(name)
except InvalidRequirement:
raise InstallationError(f"Invalid requirement: '{name}'")
else:
req = None
link = Link(url)
return RequirementParts(req, link, None, extras_override)
# ---- The actual constructors follow ----
def install_req_from_editable(
editable_req: str,
comes_from: Optional[Union[InstallRequirement, str]] = None,
use_pep517: Optional[bool] = None,
isolated: bool = False,
options: Optional[Dict[str, Any]] = None,
constraint: bool = False,
user_supplied: bool = False,
) -> InstallRequirement:
parts = parse_req_from_editable(editable_req)
return InstallRequirement(
parts.requirement,
comes_from=comes_from,
user_supplied=user_supplied,
editable=True,
link=parts.link,
constraint=constraint,
use_pep517=use_pep517,
isolated=isolated,
install_options=options.get("install_options", []) if options else [],
global_options=options.get("global_options", []) if options else [],
hash_options=options.get("hashes", {}) if options else {},
extras=parts.extras,
)
def _looks_like_path(name: str) -> bool:
"""Checks whether the string "looks like" a path on the filesystem.
This does not check whether the target actually exists, only judge from the
appearance.
Returns true if any of the following conditions is true:
* a path separator is found (either os.path.sep or os.path.altsep);
* a dot is found (which represents the current directory).
"""
if os.path.sep in name:
return True
if os.path.altsep is not None and os.path.altsep in name:
return True
if name.startswith("."):
return True
return False
def _get_url_from_path(path: str, name: str) -> Optional[str]:
"""
First, it checks whether a provided path is an installable directory. If it
is, returns the path.
If false, check if the path is an archive file (such as a .whl).
The function checks if the path is a file. If false, if the path has
an @, it will treat it as a PEP 440 URL requirement and return the path.
"""
if _looks_like_path(name) and os.path.isdir(path):
if is_installable_dir(path):
return path_to_url(path)
raise InstallationError(
f"Directory {name!r} is not installable. Neither 'setup.py' "
"nor 'pyproject.toml' found."
)
if not is_archive_file(path):
return None
if os.path.isfile(path):
return path_to_url(path)
urlreq_parts = name.split('@', 1)
if len(urlreq_parts) >= 2 and not _looks_like_path(urlreq_parts[0]):
# If the path contains '@' and the part before it does not look
# like a path, try to treat it as a PEP 440 URL req instead.
return None
logger.warning(
'Requirement %r looks like a filename, but the '
'file does not exist',
name
)
return path_to_url(path)
def parse_req_from_line(name: str, line_source: Optional[str]) -> RequirementParts:
if is_url(name):
marker_sep = '; '
else:
marker_sep = ';'
if marker_sep in name:
name, markers_as_string = name.split(marker_sep, 1)
markers_as_string = markers_as_string.strip()
if not markers_as_string:
markers = None
else:
markers = Marker(markers_as_string)
else:
markers = None
name = name.strip()
req_as_string = None
path = os.path.normpath(os.path.abspath(name))
link = None
extras_as_string = None
if is_url(name):
link = Link(name)
else:
p, extras_as_string = _strip_extras(path)
url = _get_url_from_path(p, name)
if url is not None:
link = Link(url)
# it's a local file, dir, or url
if link:
# Handle relative file URLs
if link.scheme == 'file' and re.search(r'\.\./', link.url):
link = Link(
path_to_url(os.path.normpath(os.path.abspath(link.path))))
# wheel file
if link.is_wheel:
wheel = Wheel(link.filename) # can raise InvalidWheelFilename
req_as_string = f"{wheel.name}=={wheel.version}"
else:
# set the req to the egg fragment. when it's not there, this
# will become an 'unnamed' requirement
req_as_string = link.egg_fragment
# a requirement specifier
else:
req_as_string = name
extras = convert_extras(extras_as_string)
def with_source(text: str) -> str:
if not line_source:
return text
return f'{text} (from {line_source})'
def _parse_req_string(req_as_string: str) -> Requirement:
try:
req = Requirement(req_as_string)
except InvalidRequirement:
if os.path.sep in req_as_string:
add_msg = "It looks like a path."
add_msg += deduce_helpful_msg(req_as_string)
elif ('=' in req_as_string and
not any(op in req_as_string for op in operators)):
add_msg = "= is not a valid operator. Did you mean == ?"
else:
add_msg = ''
msg = with_source(
f'Invalid requirement: {req_as_string!r}'
)
if add_msg:
msg += f'\nHint: {add_msg}'
raise InstallationError(msg)
else:
# Deprecate extras after specifiers: "name>=1.0[extras]"
# This currently works by accident because _strip_extras() parses
# any extras in the end of the string and those are saved in
# RequirementParts
for spec in req.specifier:
spec_str = str(spec)
if spec_str.endswith(']'):
msg = f"Extras after version '{spec_str}'."
raise InstallationError(msg)
return req
if req_as_string is not None:
req: Optional[Requirement] = _parse_req_string(req_as_string)
else:
req = None
return RequirementParts(req, link, markers, extras)
def install_req_from_line(
name: str,
comes_from: Optional[Union[str, InstallRequirement]] = None,
use_pep517: Optional[bool] = None,
isolated: bool = False,
options: Optional[Dict[str, Any]] = None,
constraint: bool = False,
line_source: Optional[str] = None,
user_supplied: bool = False,
) -> InstallRequirement:
"""Creates an InstallRequirement from a name, which might be a
requirement, directory containing 'setup.py', filename, or URL.
:param line_source: An optional string describing where the line is from,
for logging purposes in case of an error.
"""
parts = parse_req_from_line(name, line_source)
return InstallRequirement(
parts.requirement, comes_from, link=parts.link, markers=parts.markers,
use_pep517=use_pep517, isolated=isolated,
install_options=options.get("install_options", []) if options else [],
global_options=options.get("global_options", []) if options else [],
hash_options=options.get("hashes", {}) if options else {},
constraint=constraint,
extras=parts.extras,
user_supplied=user_supplied,
)
def install_req_from_req_string(
req_string: str,
comes_from: Optional[InstallRequirement] = None,
isolated: bool = False,
use_pep517: Optional[bool] = None,
user_supplied: bool = False,
) -> InstallRequirement:
try:
req = Requirement(req_string)
except InvalidRequirement:
raise InstallationError(f"Invalid requirement: '{req_string}'")
domains_not_allowed = [
PyPI.file_storage_domain,
TestPyPI.file_storage_domain,
]
if (req.url and comes_from and comes_from.link and
comes_from.link.netloc in domains_not_allowed):
# Explicitly disallow pypi packages that depend on external urls
raise InstallationError(
"Packages installed from PyPI cannot depend on packages "
"which are not also hosted on PyPI.\n"
"{} depends on {} ".format(comes_from.name, req)
)
return InstallRequirement(
req,
comes_from,
isolated=isolated,
use_pep517=use_pep517,
user_supplied=user_supplied,
)
def install_req_from_parsed_requirement(
parsed_req: ParsedRequirement,
isolated: bool = False,
use_pep517: Optional[bool] = None,
user_supplied: bool = False,
) -> InstallRequirement:
if parsed_req.is_editable:
req = install_req_from_editable(
parsed_req.requirement,
comes_from=parsed_req.comes_from,
use_pep517=use_pep517,
constraint=parsed_req.constraint,
isolated=isolated,
user_supplied=user_supplied,
)
else:
req = install_req_from_line(
parsed_req.requirement,
comes_from=parsed_req.comes_from,
use_pep517=use_pep517,
isolated=isolated,
options=parsed_req.options,
constraint=parsed_req.constraint,
line_source=parsed_req.line_source,
user_supplied=user_supplied,
)
return req
def install_req_from_link_and_ireq(
link: Link, ireq: InstallRequirement
) -> InstallRequirement:
return InstallRequirement(
req=ireq.req,
comes_from=ireq.comes_from,
editable=ireq.editable,
link=link,
markers=ireq.markers,
use_pep517=ireq.use_pep517,
isolated=ireq.isolated,
install_options=ireq.install_options,
global_options=ireq.global_options,
hash_options=ireq.hash_options,
)
| |
from contextlib import suppress
import http.client
import os
import tempfile
from unittest import TestCase
from app.common.build_artifact import BuildArtifact
from app.util import fs, log
from app.util.conf.base_config_loader import BaseConfigLoader
from app.util.conf.configuration import Configuration
from app.util.process_utils import is_windows
from app.util.network import Network
from app.util.secret import Secret
from app.util.url_builder import UrlBuilder
from test.framework.functional.fs_item import Directory
from test.framework.functional.functional_test_cluster import FunctionalTestCluster, TestClusterTimeoutError
class BaseFunctionalTestCase(TestCase):
"""
This is the base class for all functional tests. This class has two main purposes:
- Make available a `FunctionalTestCluster` object for use in functional tests (self.cluster)
- Implement any helper assertion methods that might be useful for making our tests easier to read and write
"""
def setUp(self):
# Configure logging to go to stdout. This makes debugging easier by allowing us to see logs for failed tests.
log.configure_logging('DEBUG')
self._reset_config()
Secret.set('testsecret')
self.cluster = FunctionalTestCluster(verbose=self._get_test_verbosity())
self._network = Network()
def _reset_config(self):
Configuration.reset_singleton()
config = Configuration.singleton()
conf_loader = BaseConfigLoader()
conf_loader.configure_defaults(config)
conf_loader.configure_postload(config)
def tearDown(self):
# Give the cluster a bit of extra time to finish working (before forcefully killing it and failing the test)
with suppress(TestClusterTimeoutError):
self.cluster.block_until_build_queue_empty(timeout=5)
# Kill processes and make sure all processes exited with 0 exit code
services = self.cluster.kill()
# only check the exit code if not on Windows as Popen.terminate kills the process on Windows and the exit
# code is not zero.
# TODO: remove the is_windows() check after we can handle exit on Windows gracefully.
if not is_windows():
for service in services:
self.assertEqual(
service.return_code,
0,
'Service running on url: {} should exit with code 0, but exited with code {}.'.format(
service.url,
service.return_code,
),
)
# Remove the temp dir. This will delete the log files, so should be run after cluster shuts down.
self.cluster.master_app_base_dir.cleanup()
[slave_app_base_dir.cleanup() for slave_app_base_dir in self.cluster.slaves_app_base_dirs]
def _get_test_verbosity(self):
"""
Get test verbosity from an env variable. We need to use an env var since Nose does not support specifying
command-line test configuration natively. (But if we need more of these configuration paramaters, we should
instead look at the 'nose-testconfig' plugin instead of adding tons of environment variables.)
:return: Whether or not tests should be run verbosely
:rtype: bool
"""
is_verbose = os.getenv('CR_VERBOSE') not in ('0', '', None) # default value of is_verbose is False
return is_verbose
def assert_build_status_contains_expected_data(self, build_id, expected_data):
"""
Assert that the build status endpoint contains the expected fields and values. This assertion does an API
request to the master service of self.cluster.
:param build_id: The id of the build whose status to check
:type build_id: int
:param expected_data: A dict of expected keys and values in the build status response
:type expected_data: dict
"""
build_status = self.cluster.master_api_client.get_build_status(build_id).get('build')
self.assertIsInstance(build_status, dict, 'Build status API request should return a dict.')
self.assertDictContainsSubset(expected_data, build_status,
'Build status API response should contain the expected status data.')
def assert_build_has_successful_status(self, build_id):
"""
Assert that the build status endpoint contains fields signifying the build was successful (had no failures).
This assertion does an API request to the master service of self.cluster.
:param build_id: The id of the build whose status to check
:type build_id: int
"""
expected_successful_build_params = {
'result': 'NO_FAILURES',
'status': 'FINISHED',
}
self.assert_build_status_contains_expected_data(build_id, expected_successful_build_params)
def assert_build_has_failure_status(self, build_id):
"""
Assert that the build status endpoint contains fields signifying the build was failed. This assertion does an
API request to the master service of self.cluster.
:param build_id: The id of the build whose status to check
:type build_id: int
"""
expected_failure_build_params = {
'result': 'FAILURE',
'status': 'FINISHED',
}
self.assert_build_status_contains_expected_data(build_id, expected_failure_build_params)
def assert_build_has_canceled_status(self, build_id):
"""
Assert that the build status endpoint contains fields signifying the build was failed. This assertion does an
API request to the master service of self.cluster.
:param build_id: The id of the build whose status to check
:type build_id: int
"""
expected_failure_build_params = {
'result': 'FAILURE',
'status': 'CANCELED',
}
self.assert_build_status_contains_expected_data(build_id, expected_failure_build_params)
def assert_build_artifact_contents_match_expected(self, master_api, build_id, expected_build_artifact_contents):
"""
Assert that artifact files for this build have the expected contents.
:type master_api: app.util.url_builder.UrlBuilder
:param build_id: The id of the build whose artifacts to check
:type build_id: int
:param expected_build_artifact_contents: A list of FSItems corresponding to the expected artifact dir contents
:type expected_build_artifact_contents: list[FSItem]
"""
with tempfile.TemporaryDirectory() as build_artifacts_dir_path:
self._download_and_extract_zip_results(master_api, build_id, build_artifacts_dir_path)
self.assert_directory_contents_match_expected(build_artifacts_dir_path, expected_build_artifact_contents)
# Also check the tar archive even though it is deprecated.
with tempfile.TemporaryDirectory() as build_artifacts_dir_path:
self._download_and_extract_tar_results(master_api, build_id, build_artifacts_dir_path)
self.assert_directory_contents_match_expected(build_artifacts_dir_path, expected_build_artifact_contents)
def assert_directory_contents_match_expected(self, dir_path, expected_dir_contents):
"""
Assert that the specified directory has the expected contents.
:param dir_path: The path of the directory whose artifacts to check
:type dir_path: string
:param expected_dir_contents: A list of FSItems corresponding to the expected directory contents
:type expected_dir_contents: list[FSItem]
"""
if expected_dir_contents is not None:
dir_path = os.path.abspath(dir_path) # converts path to absolute, removes trailing slash if present
expected_dir_name = os.path.basename(dir_path)
expected_build_artifacts = Directory(expected_dir_name, expected_dir_contents)
expected_build_artifacts.assert_matches_path(dir_path, allow_extra_items=False)
def _download_and_extract_tar_results(self, master_api, build_id, download_dir):
"""
:type master_api: app.util.url_builder.UrlBuilder
:type build_id: int
:type download_dir: str
"""
download_artifacts_url = master_api.url('build', build_id, 'result')
download_filepath = os.path.join(download_dir, BuildArtifact.ARTIFACT_TARFILE_NAME)
response = self._network.get(download_artifacts_url)
if response.status_code == http.client.OK:
# save tar file to disk, decompress, and delete
with open(download_filepath, 'wb') as file:
chunk_size = 500 * 1024
for chunk in response.iter_content(chunk_size):
file.write(chunk)
fs.extract_tar(download_filepath, delete=True)
def _download_and_extract_zip_results(self, master_api: UrlBuilder, build_id: int, download_dir: str):
"""Download the artifacts.zip from the master and extract it."""
download_artifacts_url = master_api.url('build', build_id, 'artifacts.zip')
download_filepath = os.path.join(download_dir, BuildArtifact.ARTIFACT_ZIPFILE_NAME)
response = self._network.get(download_artifacts_url)
if response.status_code == http.client.OK:
# save file to disk, decompress, and delete
with open(download_filepath, 'wb') as file:
chunk_size = 500 * 1024
for chunk in response.iter_content(chunk_size):
file.write(chunk)
fs.unzip_directory(download_filepath, delete=True)
| |
#!/usr/bin/env python
import argparse
import hashlib
import httplib
import json
import os
import re
import shutil
import sys
import tarfile
import tempfile
import time
import urllib
import urlparse
import zipfile
REST_ENDPOINT = "http://fsurf.ci-connect.net/freesurfer_test"
VERSION = 'PKG_VERSION'
STAGE_OPTIONS = ['-autorecon1', '-autorecon2-volonly', '-autorecon2']
TIME_WAIT = 3600
def zip_directory(zip_obj, directory):
"""
Recursively walk through a directory and add files to zipfile
object given
:param zip_obj: a ZipFile instance that will be populated
:param directory: path to directory
:return: True if successful, False otherwise
"""
success = True
cur_dir = os.getcwd()
try:
base_path = os.path.dirname(directory)
os.chdir(base_path)
subject_dir = os.path.basename(directory)
for root, dirs, files in os.walk(subject_dir):
for entry in dirs:
zip_obj.write(os.path.join(root, entry))
for entry in files:
zip_obj.write(os.path.join(root, entry))
zip_obj.close()
except OSError:
success = False
finally:
os.chdir(cur_dir)
return success
def check_output(job_id, user, password):
"""
Check the output of a job to make sure that it completed without errors
:param job_id: id of workflow to check
:param user: fsurf user account to use
:param password: password for fsurf user account
:return: True if output is okay, False otherwise
"""
response = get_log(job_id, user, password)
try:
log_file = open(response['filename'], 'r').read()
os.unlink(response['filename'])
except IOError:
sys.stderr.write("Can't get log file contents")
return False
except OSError:
sys.stderr.write("Can't get log file contents")
return False
count = len(re.findall('recon-all -s MRN_1 finished without error',
log_file[-200:]))
if count == 1:
return True
return False
def check_freesurfer():
"""
Check to make sure freesurfer binaries are in path
:return: True if FreeSurfer is available, false otherwise
"""
success = False
for path in os.environ['PATH'].split(':'):
if os.path.isfile(os.path.join(path, 'recon-all')):
success = True
return success
def error_message(message):
"""
Print an error message with default message
:param message: error message to write
:return: None
"""
sys.stderr.write("{0}\n".format(message))
def wait_for_completion(jobid, user, password, timeout=3):
"""
Wait for a workflow to complete
:param jobid: job id for the workflow
:param user: fsurf user account to use
:param password: password for fsurf user account
:param timeout: time to wait for job in days
:return: True on workflow completing, False if job timed out
"""
running = True
start_time = time.time()
while running:
status = get_status(jobid, user, password)
if (status != 'QUEUED') and (status != 'RUNNING'):
return True
if (time.time() - start_time) > (86400 * timeout):
sys.stderr.write("Timed out while processing\n")
return False
time.sleep(TIME_WAIT)
def convert_to_zip(tar_file):
"""
Convert specified tar file to a zipfile
:param tar_file: name of file to convert
:return: name of converted zipfile, None if error occurs
"""
work_dir = tempfile.mkdtemp()
input_1_tarball = tarfile.open(tar_file, 'r:*')
subject = input_1_tarball.getmembers()[0].path
input_1_tarball.extractall(work_dir)
zip_file = tar_file.replace('tar.bz2', 'zip').replace('output', 'input')
subject_file = zipfile.ZipFile(zip_file, 'w')
zip_directory(subject_file, os.path.join(work_dir, subject))
shutil.rmtree(work_dir)
return zip_file
def get_response(query_parameters, noun, method, endpoint=REST_ENDPOINT):
"""
Query rest endpoint with given string and return results
:param endpoint: url to REST endpoint
:param query_parameters: a dictionary with key, values parameters
:param noun: object being worked on
:param method: HTTP method that should be used
:return: (status code, response from query)
"""
url = "{0}/{2}?{1}".format(endpoint,
urllib.urlencode(query_parameters),
noun)
parsed = urlparse.urlparse(url)
try:
conn = httplib.HTTPConnection(parsed.netloc)
conn.request(method, "{0}?{1}".format(parsed.path, parsed.query))
resp = conn.getresponse()
return resp.status, resp.read()
except IOError as e: # mainly dns errors
response = {'status': 500,
'result': str(e)}
return 500, json.dumps(response)
except httplib.HTTPException as e:
response = {'status': 400,
'result': str(e)}
return 400, json.dumps(response)
def download_output(query_parameters, noun, endpoint=REST_ENDPOINT):
"""
Query rest endpoint with given string and return results
:param endpoint: url to REST endpoint
:param query_parameters: a dictionary with key, values parameters
:param noun: object being worked on
:return: (status code, response from query)
"""
url = "{0}/{2}?{1}".format(endpoint,
urllib.urlencode(query_parameters),
noun)
parsed = urlparse.urlparse(url)
try:
conn = httplib.HTTPConnection(parsed.hostname)
conn.request('GET', "{0}?{1}".format(parsed.path, parsed.query))
resp = conn.getresponse()
content_type = resp.getheader('content-type')
if content_type.startswith('application/x-bzip2') and \
content_type.startswith('text/plain'):
return resp.status, resp.read()
content_disposition = resp.getheader('content-disposition')
if content_type.startswith('application/x-bzip2'):
filename = 'fsurf_output.tar.bz2'
elif content_type.startswith('text/plain'):
filename = 'recon-all.log'
else:
response = {'status': 500,
'result': "Unknown content-type: "
"{0}".format(content_type)}
return 500, json.dumps(response)
match_obj = re.search(r'filename=(.*)', content_disposition)
if match_obj:
filename = match_obj.group(1)
with open(filename, 'wb') as f:
temp = resp.read(4096)
while temp:
f.write(temp)
temp = resp.read(4096)
return resp.status, json.dumps({'status': 200,
'result': "output downloaded",
'filename': filename})
except httplib.HTTPException as e:
response = {'status': 400,
'result': str(e)}
return 400, json.dumps(response)
def encode_file(body, filename):
"""
Encode a file in a binary form and return a mime content type
and encoded binary data
:param body: binary data to encode
:param filename: name of file with data to encode
:return: content_type, body
"""
boundary = '--------------MIME_Content_Boundary---------------'
lines = []
lines.append('--' + boundary)
lines.append('Content-Disposition: form-data; name="input_file"; '
'filename="{0}"'.format(filename))
lines.append('Content-Type: application/octet-stream')
lines.append('')
lines.append(body)
lines.append('--' + boundary + '--')
lines.append('')
encoded = "\r\n".join(lines)
content_type = 'multipart/form-data; boundary=%s' % boundary
return content_type, encoded
def upload_item(query_parameters, noun, filename, body, method, endpoint=REST_ENDPOINT):
"""
Issue a POST request to given endpoint
:param endpoint: url to REST endpoint
:param query_parameters: a dictionary with key, values parameters
:param noun: object being worked on
:param filename: name of file being transferred
:param body: data to be sent in the body
:param method: HTTP method that should be used (POST, PUT)
:return: (status code, response from query)
"""
url = "{0}/{2}?{1}".format(endpoint,
urllib.urlencode(query_parameters),
noun)
parsed = urlparse.urlparse(url)
try:
conn = httplib.HTTPConnection(parsed.hostname)
content_type, body = encode_file(body, filename)
headers = {'content-type': content_type,
'Accept': 'text/plain'}
conn.request(method,
"{0}?{1}".format(parsed.path, parsed.query),
body,
headers)
resp = conn.getresponse()
if resp.status == 401:
# invalid password
response = {'status': resp.status,
'result': 'Invalid username/password'}
return resp.status, json.dumps(response)
elif resp.status == 400:
# invalid password
response = {'status': resp.status,
'result': 'Invalid parameter'}
return resp.status, json.dumps(response)
return resp.status, resp.read()
except httplib.HTTPException as e:
response = {'status': 400,
'result': str(e)}
return 400, json.dumps(response)
def get_token(userid, password):
"""
Generate an authentication token and timestamp
:param userid: user id identifying account
:param password: password for user account
:return: timestamp, token
"""
parameters = {'userid': userid}
code, response = get_response(parameters, 'user/salt', 'GET', REST_ENDPOINT)
if code == 401:
error_message("User account disabled\n")
return None, None
elif code == 400:
error_message("Userid not found\n")
return None, None
timestamp = time.time()
response_obj = json.loads(response)
salt = response_obj['result']
token = hashlib.sha256(salt + password).hexdigest()
token = hashlib.sha256(token + str(timestamp)).hexdigest()
return str(timestamp), token
def remove_workflow(workflow_id, username, password):
"""
Stop and remove a specified pegasus workflow
:param workflow_id: pegasus id for workflow
:param username: username to use when authenticating
:param password: password to user when authenticating
:return: exits with True on success, False on error
"""
timestamp, token = get_token(username, password)
if token is None:
return False
query_params = {'userid': username,
'timestamp': timestamp,
'token': token,
'jobid': workflow_id}
status, response = get_response(query_params, 'job', 'DELETE')
resp_dict = json.loads(response)
if status != 200:
error_message("Error deleting workflow:\n" +
resp_dict['result'])
return False
sys.stdout.write("Workflow removed\n")
return True
def submit_custom_workflow(username, password, version, subject, subject_dir, options):
"""
Submit a workflow to OSG for processing
:param username: username to use when authenticating
:param password: password to user when authenticating
:param version: version of freesurfer to use
:param subject: name of subject in the file
:param subject_dir: path to file with FreeSurfer subject dir in a zip file
:param options: options to use when running workflow
:return: job_id on success, None on error
"""
timestamp, token = get_token(username, password)
if options and not subject_dir:
sys.stderr.write("You must provide a subject directory file if "
"using custom options!\n")
sys.exit(1)
query_params = {'userid': username,
'token': token,
'multicore': False,
'num_inputs': 1,
'options': options,
'version': version,
'subject': subject,
'timestamp': timestamp,
'jobname': "validation_{0}_{1}".format(subject, timestamp)}
sys.stdout.write("Creating and submitting workflow\n")
status, response = get_response(query_params, 'job', 'POST')
if status != 200:
response_obj = json.loads(response)
error_message("Error while creating workflow:\n" +
response_obj['result'])
sys.exit(1)
response_obj = json.loads(response)
job_id = response_obj['job_id']
sys.stdout.write("Workflow {0} created\n".format(job_id))
sys.stdout.write("Uploading input files\n")
if subject_dir:
attempts = 1
sys.stdout.write("Uploading {0}\n".format(subject_dir))
while attempts < 6:
query_params = {'userid': username,
'timestamp': timestamp,
'token': token,
'jobid': job_id,
'filename': os.path.basename(subject_dir),
'subjectdir': True}
input_path = os.path.abspath(subject_dir)
if not os.path.isfile(input_path):
sys.stderr.write("{0} is not present and is needed, "
"exiting\n".format(input_path))
sys.exit(1)
with open(input_path, 'rb') as f:
body = f.read()
status, response = upload_item(query_params,
'job/input',
os.path.basename(subject_dir),
body,
'POST')
if status == 200:
sys.stdout.write("Uploaded {0} successfully\n".format(subject_dir))
break
response_obj = json.loads(response)
sys.stdout.write("Error while uploading {0}\n".format(subject_dir))
sys.stdout.write("Error: {0}\n".format(response_obj['result']))
sys.stdout.write("Retrying upload, attempt {0}/5\n".format(attempts))
attempts += 1
if attempts == 6:
sys.stdout.write("Could not upload {0}\n".format(subject_dir))
sys.stdout.write("Exiting...\n")
return None
return job_id
def get_output(workflow_id, username, password):
"""
Get MRI data for a completed workflow
:param workflow_id: pegasus id for workflow
:param username: username to use when authenticating
:param password: password to user when authenticating
:return: response_obj with status and filename
"""
query_params = {}
timestamp, token = get_token(username, password)
if token is None:
return {'status': 400, 'result': 'Can\'t authenticate'}
query_params['userid'] = username
query_params['timestamp'] = timestamp
query_params['token'] = token
query_params['jobid'] = workflow_id
sys.stdout.write("Downloading results, this may take a while\n")
status, response = download_output(query_params, 'job/output')
response_obj = json.loads(response)
if status != 200:
sys.stdout.write("Error while downloading results:\n")
sys.stdout.write("{0}\n".format(response_obj['result']))
return response_obj
sys.stdout.write("Downloaded to {0}\n".format(response_obj['filename']))
sys.stdout.write("To extract the results: tar "
"xvjf {0}\n".format(response_obj['filename']))
return response_obj
def get_log(workflow_id, username, password):
"""
Get logs for a completed workflow
:param workflow_id: pegasus id for workflow
:param username: username to use when authenticating
:param password: password to user when authenticating
:return: response_obj with status and filename
"""
query_params = {}
timestamp, token = get_token(username, password)
if token is None:
return {'status': 400, 'result': 'Can\'t authenticate'}
query_params['userid'] = username
query_params['timestamp'] = timestamp
query_params['token'] = token
query_params['jobid'] = workflow_id
sys.stdout.write("Downloading log file, this may take a while\n")
status, response = download_output(query_params, 'job/log')
response_obj = json.loads(response)
if status != 200:
sys.stdout.write("Error while downloading log file:\n")
sys.stdout.write("{0}\n".format(response_obj['result']))
return response_obj
sys.stdout.write("Downloaded to {0}\n".format(response_obj['filename']))
return response_obj
def get_status(workflow_id, username, password):
"""
Get status for a workflow
:param workflow_id: pegasus id for workflow
:param username: username to use when authenticating
:param password: password to user when authenticating
:return: job status
"""
query_params = {}
timestamp, token = get_token(username, password)
if token is None:
return 'ERROR'
query_params['userid'] = username
query_params['timestamp'] = timestamp
query_params['token'] = token
query_params['jobid'] = workflow_id
status, response = get_response(query_params,
'job/status',
'GET')
response_obj = json.loads(response)
if status == 404:
sys.stdout.write("Workflow with id {0} not found\n".format(workflow_id))
return 'ERROR'
elif status != 200:
sys.stdout.write("Error while getting job status:\n")
sys.stdout.write("{0}\n".format(response_obj['result']))
return response_obj['result']
sys.stdout.write("Current job status: {0}\n".format(response_obj['job_status']))
return response_obj['job_status']
def run_stage(options, input_filename, args):
"""
Run a Freesurfer stage
:param options: options for running the stage
:param input_filename: name of file to use as input
:param args: argparse output
:return: name of output file, None if error occurred
"""
stage_job_id = submit_custom_workflow(args.user,
args.password,
args.freesurfer_version,
args.subject,
input_filename,
options)
if stage_job_id is None:
sys.stderr.write("Can't submit jobs\n")
sys.exit(1)
if not wait_for_completion(stage_job_id, args.user, args.password):
remove_workflow(stage_job_id, args.user, args.password)
return None
response = get_output(stage_job_id, args.user, args.password)
if response['status'] != 200:
remove_workflow(stage_job_id, args.user, args.password)
return None
output_filename = response['filename']
if check_output(stage_job_id, args.user, args.password):
sys.stdout.write("recon-all log indicates success\n")
else:
sys.stdout.write("recon-all log indicates error!\n")
os.unlink(output_filename)
return None
if not remove_workflow(stage_job_id, args.user, args.password):
sys.stderr.write("Can't remove workflow, exiting...\n")
os.unlink(output_filename)
return None
return output_filename
def main():
"""
Main function that parses arguments and generates the pegasus
workflow
:return: True if any errors occurred during DAX generaton
"""
parser = argparse.ArgumentParser(description="Process freesurfer information")
# version info
parser.add_argument('--version', action='version', version='%(prog)s ' + VERSION)
parser.add_argument('--subject', dest='subject',
help='subject name')
parser.add_argument('--subject-dir', dest='subject_dir',
help='subject directory file')
parser.add_argument('--input-file',
dest='input_files',
action='append',
default=[],
help='path to input file(s), this can be used '
'multiple times')
parser.add_argument('--freesurfer-version',
dest='freesurfer_version',
choices=['5.3.0'],
default='5.3.0',
help='version of FreeSurfer to use')
parser.add_argument('--dualcore', dest='multicore',
action='store_false', default=True,
help='Use 2 cores to process certain steps')
# General arguments
parser.add_argument('--user', dest='user', default=None,
help='Username to use to login')
parser.add_argument('--password', dest='password',
default=None, help='Password used to login')
args = parser.parse_args(sys.argv[1:])
if not check_freesurfer():
sys.stderr.write("FreeSurfer binaries not in path, exiting\n")
sys.exit(1)
input_filename = args.subject_dir
for option_set in STAGE_OPTIONS:
try:
output_filename = run_stage(option_set, input_filename, args)
if output_filename is None:
sys.stderr.write("Error running Fsurf " +
"with {0}\n".format(option_set))
input_filename = convert_to_zip(output_filename)
sys.stdout.write("Using {0} as ".format(input_filename) +
"input for next step\n")
except:
sys.stderr.write("Error running with {0}\n".format(option_set))
sys.exit(1)
sys.stdout.write("Validation run successfully!\n")
sys.exit(0)
if __name__ == '__main__':
main()
| |
#
# PluginManagerQt.py -- Simple class to manage plugins.
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import sys
import threading
import traceback
from ginga.qtw.QtHelp import QtGui, QtCore
from ginga.qtw import QtHelp
from ginga.misc import Bunch, Future
class PluginManagerError(Exception):
pass
class PluginManager(object):
def __init__(self, logger, fitsview, ds, mm):
super(PluginManager, self).__init__()
self.logger = logger
self.fv = fitsview
self.ds = ds
self.mm = mm
self.lock = threading.RLock()
self.plugin = Bunch.caselessDict()
self.active = {}
self.focus = set([])
self.exclusive = set([])
self.focuscolor = 'lightgreen'
self.hbox = None
def set_widget(self, hbox):
self.hbox = hbox
def loadPlugin(self, name, spec, chinfo=None):
try:
module = self.mm.getModule(spec.module)
className = spec.get('klass', spec.module)
klass = getattr(module, className)
if chinfo == None:
# global plug in
obj = klass(self.fv)
fitsimage = None
else:
# local plugin
fitsimage = chinfo.fitsimage
obj = klass(self.fv, fitsimage)
# Prepare configuration for module
opname = name.lower()
self.plugin[opname] = Bunch.Bunch(klass=klass, obj=obj,
widget=None, name=name,
spec=spec,
fitsimage=fitsimage,
chinfo=chinfo)
self.logger.info("Plugin '%s' loaded." % name)
except Exception, e:
self.logger.error("Failed to load plugin '%s': %s" % (
name, str(e)))
#raise PluginManagerError(e)
def reloadPlugin(self, plname, chinfo=None):
pInfo = self.getPluginInfo(plname)
return self.loadPlugin(pInfo.name, pInfo.spec, chinfo=chinfo)
def getPluginInfo(self, plname):
plname = plname.lower()
pInfo = self.plugin[plname]
return pInfo
def getPlugin(self, name):
pInfo = self.getPluginInfo(name)
return pInfo.obj
def getNames(self):
return self.plugin.keys()
def update_taskbar(self, localmode=True):
## with self.lock:
if localmode:
for child in self.hbox.get_children():
#self.hbox.remove(child)
child.hide()
for name in self.active.keys():
bnch = self.active[name]
#self.hbox.pack_start(bnch.widget, expand=False, fill=False)
bnch.widget.show()
def activate(self, pInfo, exclusive=True):
self.logger.debug("pInfo: %s" % (str(pInfo)))
name = pInfo.tabname
lname = pInfo.name.lower()
if not self.active.has_key(lname):
tup = name.split(':')
lblname = ' ' + tup[0] + ':\n' + tup[1] + ' '
lbl = QtGui.QLabel(lblname)
lbl.setAlignment(QtCore.Qt.AlignHCenter)
lbl.setToolTip("Right click for menu")
lbl.setSizePolicy(QtGui.QSizePolicy.Minimum,QtGui.QSizePolicy.Minimum)
lbl.setFrameStyle(QtGui.QFrame.Box | QtGui.QFrame.Raised)
self.hbox.addWidget(lbl, stretch=0,
alignment=QtCore.Qt.AlignLeft)
lbl.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
# better than making a whole new subclass just to get a label to
# respond to a mouse click
lbl.mousePressEvent = lambda event: lbl.emit(QtCore.SIGNAL("clicked"))
menu = QtGui.QMenu()
item = QtGui.QAction("Focus", menu)
item.triggered.connect(lambda: self.set_focus(lname))
menu.addAction(item)
item = QtGui.QAction("Stop", menu)
item.triggered.connect(lambda: self.deactivate(lname))
menu.addAction(item)
def on_context_menu(point):
menu.exec_(lbl.mapToGlobal(point))
lbl.connect(lbl, QtCore.SIGNAL('customContextMenuRequested(const QPoint&)'), on_context_menu)
lbl.connect(lbl, QtCore.SIGNAL('clicked'),
lambda: self.set_focus(lname))
bnch = Bunch.Bunch(widget=lbl, label=lbl, lblname=lblname,
menu=menu, pInfo=pInfo, exclusive=exclusive)
self.active[lname] = bnch
if exclusive:
self.exclusive.add(lname)
def deactivate(self, name):
self.logger.debug("deactivating %s" % (name))
lname = name.lower()
if lname in self.focus:
self.clear_focus(lname)
if self.active.has_key(lname):
bnch = self.active[lname]
self.logger.debug("stopping plugin")
self.stop_plugin(bnch.pInfo)
self.logger.debug("removing from tray")
QtHelp.removeWidget(self.hbox, bnch.widget)
bnch.widget = None
bnch.label = None
self.logger.debug("removing from dict")
del self.active[lname]
# Set focus to another plugin if one is running
active = self.active.keys()
if len(active) > 0:
name = active[0]
self.set_focus(name)
def deactivate_focused(self):
names = self.get_focus()
for name in names:
self.deactivate(name)
def get_active(self):
return self.active.keys()
def is_active(self, key):
lname = key.lower()
return lname in self.get_active()
def get_focus(self):
return list(self.focus)
def get_info(self, name):
lname = name.lower()
return self.active[lname]
def set_focus(self, name):
self.logger.info("Focusing plugin '%s'" % (name))
lname = name.lower()
bnch = self.active[lname]
if bnch.exclusive:
self.logger.debug("focus=%s exclusive=%s" % (
self.focus, self.exclusive))
defocus = filter(lambda x: x in self.exclusive, self.focus)
self.logger.debug("defocus: %s" % (str(defocus)))
for xname in defocus:
self.clear_focus(xname)
pInfo = bnch.pInfo
# If this is a local plugin, raise the channel associated with the
# plug in
if pInfo.chinfo != None:
itab = pInfo.chinfo.name
self.logger.debug("raising tab %s" % (itab))
self.ds.raise_tab(itab)
self.logger.debug("resuming plugin %s" % (name))
pInfo.obj.resume()
self.logger.debug("adding focus %s" % (lname))
self.focus.add(lname)
bnch.label.setStyleSheet("QLabel { background-color: %s; }" % (
self.focuscolor))
if pInfo.widget != None:
self.ds.raise_tab('Dialogs')
self.logger.debug("raising tab %s" % (pInfo.tabname))
self.ds.raise_tab(pInfo.tabname)
def clear_focus(self, name):
self.logger.debug("unfocusing %s" % name)
self.logger.info("Unfocusing plugin '%s'" % (name))
lname = name.lower()
bnch = self.active[lname]
try:
self.focus.remove(lname)
bnch.pInfo.obj.pause()
except:
pass
bnch.label.setStyleSheet("QLabel { background-color: grey; }")
def start_plugin(self, chname, opname, alreadyOpenOk=False):
return self.start_plugin_future(chname, opname, None,
alreadyOpenOk=alreadyOpenOk)
def start_plugin_future(self, chname, opname, future,
alreadyOpenOk=False):
pInfo = self.getPluginInfo(opname)
plname = chname.upper() + ': ' + pInfo.name
lname = pInfo.name.lower()
if self.active.has_key(lname):
if alreadyOpenOk:
# TODO: raise widgets, rerun start()?
return
raise PluginManagerError("Plugin %s is already active." % (
plname))
# Raise tab with GUI
pInfo.tabname = plname
vbox = None
had_error = False
try:
if hasattr(pInfo.obj, 'build_gui'):
widget = QtHelp.VBox()
vbox = widget.layout()
if future:
pInfo.obj.build_gui(vbox, future=future)
else:
pInfo.obj.build_gui(vbox)
except Exception, e:
had_error = True
errstr = "Plugin UI failed to initialize: %s" % (str(e))
self.logger.error(errstr)
try:
(type, value, tb) = sys.exc_info()
tb_str = "\n".join(traceback.format_tb(tb))
self.logger.error("Traceback:\n%s" % (tb_str))
except Exception, e:
tb_str = "Traceback information unavailable."
self.logger.error(tb_str)
textw = QtGui.QTextEdit()
textw.append(errstr + '\n')
textw.append(tb_str)
textw.setReadOnly(True)
vbox.addWidget(textw, stretch=1)
#raise PluginManagerError(e)
if not had_error:
try:
if future:
pInfo.obj.start(future=future)
else:
pInfo.obj.start()
except Exception, e:
had_error = True
errstr = "Plugin failed to start correctly: %s" % (
str(e))
self.logger.error(errstr)
try:
(type, value, tb) = sys.exc_info()
tb_str = "".join(traceback.format_tb(tb))
self.logger.error("Traceback:\n%s" % (tb_str))
except Exception, e:
tb_str = "Traceback information unavailable."
self.logger.error(tb_str)
textw = QtGui.QTextEdit()
textw.append(errstr + '\n')
textw.append(tb_str)
textw.setReadOnly(True)
vbox.addWidget(textw, stretch=1)
#raise PluginManagerError(e)
if vbox != None:
nb = self.ds.get_nb('Dialogs')
self.ds.add_tab(nb, widget, 2, pInfo.tabname, pInfo.tabname)
pInfo.widget = widget
self.activate(pInfo)
self.set_focus(pInfo.name)
else:
# If this is a local plugin, raise the channel associated with the
# plug in
if pInfo.chinfo != None:
itab = pInfo.chinfo.name
self.logger.debug("raising tab %s" % itab)
self.ds.raise_tab(itab)
def stop_plugin(self, pInfo):
self.logger.debug("stopping plugin %s" % (str(pInfo)))
wasError = False
try:
pInfo.obj.stop()
except Exception, e:
wasError = True
self.logger.error("Plugin failed to stop correctly: %s" % (
str(e)))
try:
(type, value, tb) = sys.exc_info()
tb_str = "".join(traceback.format_tb(tb))
self.logger.error("Traceback:\n%s" % (tb_str))
except Exception:
self.logger.error("Traceback information unavailable.")
if pInfo.widget != None:
self.ds.remove_tab(pInfo.tabname)
self.logger.debug("removing widget")
widget = pInfo.widget
pInfo.widget = None
#QtHelp.removeWidget(self.hbox, widget)
# If there are no more dialogs present, raise Thumbs
nb = self.ds.get_nb('Dialogs')
#num_dialogs = nb.get_n_pages()
num_dialogs = len(nb.children())
if num_dialogs == 0:
try:
self.ds.raise_tab('Thumbs')
except:
# No Thumbs tab--OK
pass
if wasError:
raise PluginManagerError(e)
#END
| |
import imp
import logging
import os
import sys
from mock import *
from gp_unittest import *
from gppylib.gpcatalog import GPCatalogTable
class GpCheckCatTestCase(GpTestCase):
def setUp(self):
# because gpcheckcat does not have a .py extension, we have to use imp to import it
# if we had a gpcheckcat.py, this is equivalent to:
# import gpcheckcat
# self.subject = gpcheckcat
gpcheckcat_file = os.path.abspath(os.path.dirname(__file__) + "/../../../gpcheckcat")
self.subject = imp.load_source('gpcheckcat', gpcheckcat_file)
self.db_connection = Mock(spec=['close', 'query'])
self.unique_index_violation_check = Mock(spec=['runCheck'])
self.foreign_key_check = Mock(spec=['runCheck', 'checkTableForeignKey'])
self.apply_patches([
patch("gpcheckcat.pg.connect", return_value=self.db_connection),
patch("gpcheckcat.UniqueIndexViolationCheck", return_value=self.unique_index_violation_check),
patch("gpcheckcat.ForeignKeyCheck", return_value=self.foreign_key_check),
patch('os.environ', new={}),
patch('pygresql.pgdb'),
])
self.subject.logger = Mock(spec=['log', 'info', 'debug', 'error', 'fatal'])
self.unique_index_violation_check.runCheck.return_value = []
self.leaked_schema_dropper = Mock(spec=['drop_leaked_schemas'])
self.leaked_schema_dropper.drop_leaked_schemas.return_value = []
issues_list = dict()
issues_list['cat1'] = [('pg_class', ['pkey1', 'pkey2'], [('r1', 'r2'), ('r3', 'r4')]),
('arbitrary_catalog_table', ['pkey1', 'pkey2'], [('r1', 'r2'), ('r3', 'r4')])]
issues_list['cat2'] = [('pg_type', ['pkey1', 'pkey2'], [('r1', 'r2'), ('r3', 'r4')]),
('arbitrary_catalog_table', ['pkey1', 'pkey2'], [('r1', 'r2'), ('r3', 'r4')])]
self.foreign_key_check.runCheck.return_value = issues_list
self.subject.GV.cfg = {0:dict(hostname='host0', port=123, id=1, address='123', datadir='dir', content=-1, dbid=0),
1:dict(hostname='host1', port=123, id=1, address='123', datadir='dir', content=1, dbid=1)}
self.subject.GV.checkStatus = True
self.subject.GV.foreignKeyStatus = True
self.subject.GV.missingEntryStatus = True
self.subject.setError = Mock()
self.subject.print_repair_issues = Mock()
def test_running_unknown_check__raises_exception(self):
with self.assertRaises(LookupError):
self.subject.runOneCheck('some_unknown_check')
# @skip("order of checks")
# def test_run_all_checks__runs_all_checks_in_correct_order(self):
# self.subject.runAllChecks()
#
# self.unique_index_violation_check.runCheck.assert_any_call(self.db_connection)
# # add other checks here
# # figure out how to enforce the order of calls;
# # at a minimum, check the order number of the static list gpcheckcat.all_checks
def test_running_unique_index_violation_check__makes_the_check(self):
self.subject.runOneCheck('unique_index_violation')
self.unique_index_violation_check.runCheck.assert_called_with(self.db_connection)
def test_running_unique_index_violation_check__when_no_violations_are_found__passes_the_check(self):
self.subject.runOneCheck('unique_index_violation')
self.assertTrue(self.subject.GV.checkStatus)
self.subject.setError.assert_not_called()
def test_running_unique_index_violation_check__when_violations_are_found__fails_the_check(self):
self.unique_index_violation_check.runCheck.return_value = [
dict(table_oid=123, table_name='stephen_table', index_name='finger', column_names='c1, c2', violated_segments=[-1,8]),
dict(table_oid=456, table_name='larry_table', index_name='stock', column_names='c1', violated_segments=[-1]),
]
self.subject.runOneCheck('unique_index_violation')
self.assertFalse(self.subject.GV.checkStatus)
self.subject.setError.assert_any_call(self.subject.ERROR_NOREPAIR)
def test_checkcat_report__after_running_unique_index_violations_check__reports_violations(self):
self.unique_index_violation_check.runCheck.return_value = [
dict(table_oid=123, table_name='stephen_table', index_name='finger', column_names='c1, c2', violated_segments=[-1,8]),
dict(table_oid=456, table_name='larry_table', index_name='stock', column_names='c1', violated_segments=[-1]),
]
self.subject.runOneCheck('unique_index_violation')
self.subject.checkcatReport()
expected_message1 = ' Table stephen_table has a violated unique index: finger'
expected_message2 = ' Table larry_table has a violated unique index: stock'
log_messages = [args[0][1] for args in self.subject.logger.log.call_args_list]
self.assertIn(expected_message1, log_messages)
self.assertIn(expected_message2, log_messages)
def test_drop_leaked_schemas__when_no_leaked_schemas_exist__passes_gpcheckcat(self):
self.subject.drop_leaked_schemas(self.leaked_schema_dropper, self.db_connection)
self.subject.setError.assert_not_called()
def test_drop_leaked_schemas____when_leaked_schemas_exist__finds_and_drops_leaked_schemas(self):
self.leaked_schema_dropper.drop_leaked_schemas.return_value = ['schema1', 'schema2']
self.subject.drop_leaked_schemas(self.leaked_schema_dropper, self.db_connection)
self.leaked_schema_dropper.drop_leaked_schemas.assert_called_once_with(self.db_connection)
def test_drop_leaked_schemas__when_leaked_schemas_exist__passes_gpcheckcat(self):
self.leaked_schema_dropper.drop_leaked_schemas.return_value = ['schema1', 'schema2']
self.subject.drop_leaked_schemas(self.leaked_schema_dropper, self.db_connection)
self.subject.setError.assert_not_called()
def test_drop_leaked_schemas__when_leaked_schemas_exist__reports_which_schemas_are_dropped(self):
self.leaked_schema_dropper.drop_leaked_schemas.return_value = ['schema1', 'schema2']
self.subject.drop_leaked_schemas(self.leaked_schema_dropper, "some_db_name")
expected_message = "Found and dropped 2 unbound temporary schemas"
log_messages = [args[0][1] for args in self.subject.logger.log.call_args_list]
self.assertIn(expected_message, log_messages)
def test_automatic_thread_count(self):
self.db_connection.query.return_value.getresult.return_value = [[0]]
self._run_batch_size_experiment(100)
self._run_batch_size_experiment(101)
@patch('gpcheckcat.GPCatalog', return_value=Mock())
@patch('sys.exit')
@patch('gpcheckcat.log_literal')
def test_truncate_batch_size(self, mock_log, mock_gpcheckcat, mock_sys_exit):
self.subject.GV.opt['-B'] = 300 # override the setting from available memory
# setup conditions for 50 primaries and plenty of RAM such that max threads > 50
primaries = [dict(hostname='host0', port=123, id=1, address='123', datadir='dir', content=-1, dbid=0, isprimary='t')]
for i in range(1, 50):
primaries.append(dict(hostname='host0', port=123, id=1, address='123', datadir='dir', content=1, dbid=i, isprimary='t'))
self.db_connection.query.return_value.getresult.return_value = [['4.3']]
self.db_connection.query.return_value.dictresult.return_value = primaries
testargs = ['some_string','-port 1', '-R foo']
# GOOD_MOCK_EXAMPLE for testing functionality in "__main__": put all code inside a method "main()",
# which can then be mocked as necessary.
with patch.object(sys, 'argv', testargs):
self.subject.main()
self.assertEquals(self.subject.GV.opt['-B'], len(primaries))
#mock_log.assert_any_call(50, "Truncated batch size to number of primaries: 50")
# I am confused that .assert_any_call() did not seem to work as expected --Larry
last_call = mock_log.call_args_list[0][0][2]
self.assertEquals(last_call, "Truncated batch size to number of primaries: 50")
@patch('gpcheckcat_modules.repair.Repair', return_value=Mock())
@patch('gpcheckcat_modules.repair.Repair.create_repair_for_extra_missing', return_value="/tmp")
def test_do_repair_for_extra__issues_repair(self, mock1, mock2):
issues = {("pg_class", "oid"):"extra"}
self.subject.GV.opt['-E'] = True
self.subject.do_repair_for_extra(issues)
self.subject.setError.assert_any_call(self.subject.ERROR_REMOVE)
self.subject.print_repair_issues.assert_any_call("/tmp")
@patch('gpcheckcat.removeFastSequence')
@patch('gpcheckcat.processForeignKeyResult')
def test_checkForeignKey__with_arg_gp_fastsequence(self, process_foreign_key_mock,fast_seq_mock):
cat_mock = Mock()
self.subject.GV.catalog = cat_mock
gp_fastsequence_issue = dict()
gp_fastsequence_issue['gp_fastsequence'] = [('pg_class', ['pkey1', 'pkey2'], [('r1', 'r2'), ('r3', 'r4')]),
('arbitrary_catalog_table', ['pkey1', 'pkey2'], [('r1', 'r2'), ('r3', 'r4')])]
gp_fastsequence_issue['cat2'] = [('pg_type', ['pkey1', 'pkey2'], [('r1', 'r2'), ('r3', 'r4')]),
('arbitrary_catalog_table', ['pkey1', 'pkey2'], [('r1', 'r2'), ('r3', 'r4')])]
self.foreign_key_check.runCheck.return_value = gp_fastsequence_issue
cat_tables = ["input1", "input2"]
self.subject.checkForeignKey(cat_tables)
self.assertEquals(cat_mock.getCatalogTables.call_count, 0)
self.assertFalse(self.subject.GV.checkStatus)
self.assertTrue(self.subject.GV.foreignKeyStatus)
self.subject.setError.assert_any_call(self.subject.ERROR_REMOVE)
self.foreign_key_check.runCheck.assert_called_once_with(cat_tables)
fast_seq_mock.assert_called_once_with(self.db_connection)
@patch('gpcheckcat.processForeignKeyResult')
def test_checkForeignKey__with_arg(self, process_foreign_key_mock):
cat_mock = Mock()
self.subject.GV.catalog = cat_mock
cat_tables = ["input1", "input2"]
self.subject.checkForeignKey(cat_tables)
self.assertEquals(cat_mock.getCatalogTables.call_count, 0)
self.assertFalse(self.subject.GV.checkStatus)
self.assertTrue(self.subject.GV.foreignKeyStatus)
self.subject.setError.assert_any_call(self.subject.ERROR_NOREPAIR)
self.foreign_key_check.runCheck.assert_called_once_with(cat_tables)
@patch('gpcheckcat.processForeignKeyResult')
def test_checkForeignKey__no_arg(self, process_foreign_key_mock):
cat_mock = Mock(spec=['getCatalogTables'])
cat_tables = ["input1", "input2"]
cat_mock.getCatalogTables.return_value = cat_tables
self.subject.GV.catalog = cat_mock
self.subject.checkForeignKey()
self.assertEquals(cat_mock.getCatalogTables.call_count, 1)
self.assertFalse(self.subject.GV.checkStatus)
self.assertTrue(self.subject.GV.foreignKeyStatus)
self.subject.setError.assert_any_call(self.subject.ERROR_NOREPAIR)
self.foreign_key_check.runCheck.assert_called_once_with(cat_tables)
# Test gpcheckat -C option with checkForeignKey
@patch('gpcheckcat.GPCatalog', return_value=Mock())
@patch('sys.exit')
@patch('gpcheckcat.checkTableMissingEntry')
def test_runCheckCatname__for_checkForeignKey(self, mock1, mock2, mock3):
self.subject.checkForeignKey = Mock()
gpcat_class_mock = Mock(spec=['getCatalogTable'])
cat_obj_mock = Mock()
self.subject.getCatObj = gpcat_class_mock
self.subject.getCatObj.return_value = cat_obj_mock
primaries = [dict(hostname='host0', port=123, id=1, address='123', datadir='dir', content=-1, dbid=0, isprimary='t')]
for i in range(1, 50):
primaries.append(dict(hostname='host0', port=123, id=1, address='123', datadir='dir', content=1, dbid=i, isprimary='t'))
self.db_connection.query.return_value.getresult.return_value = [['4.3']]
self.db_connection.query.return_value.dictresult.return_value = primaries
self.subject.GV.opt['-C'] = 'pg_class'
testargs = ['gpcheckcat', '-port 1', '-C pg_class']
with patch.object(sys, 'argv', testargs):
self.subject.main()
self.subject.getCatObj.assert_called_once_with(' pg_class')
self.subject.checkForeignKey.assert_called_once_with([cat_obj_mock])
@patch('gpcheckcat.checkTableMissingEntry', return_value = None)
def test_checkMissingEntry__no_issues(self, mock1):
cat_mock = Mock()
cat_tables = ["input1", "input2"]
cat_mock.getCatalogTables.return_value = cat_tables
self.subject.GV.catalog = cat_mock
self.subject.runOneCheck("missing_extraneous")
self.assertTrue(self.subject.GV.missingEntryStatus)
self.subject.setError.assert_not_called()
@patch('gpcheckcat.checkTableMissingEntry', return_value= {("pg_clas", "oid"): "extra"})
@patch('gpcheckcat.getPrimaryKeyColumn', return_value = (None,"oid"))
def test_checkMissingEntry__uses_oid(self, mock1, mock2):
self.subject.GV.opt['-E'] = True
aTable = Mock(spec=GPCatalogTable)
cat_mock = Mock()
cat_mock.getCatalogTables.return_value = [aTable]
self.subject.GV.catalog = cat_mock
self.subject.runOneCheck("missing_extraneous")
self.assertEquals(aTable.getPrimaryKey.call_count, 1)
self.subject.setError.assert_called_once_with(self.subject.ERROR_REMOVE)
@patch('gpcheckcat.checkTableMissingEntry', return_value= {("pg_operator", "typename, typenamespace"): "extra"})
@patch('gpcheckcat.getPrimaryKeyColumn', return_value = (None,None))
def test_checkMissingEntry__uses_pkeys(self, mock1, mock2):
self.subject.GV.opt['-E'] = True
aTable = MagicMock(spec=GPCatalogTable)
aTable.tableHasConsistentOids.return_value = False
cat_mock = Mock()
cat_mock.getCatalogTables.return_value = [aTable]
self.subject.GV.catalog = cat_mock
self.subject.runOneCheck("missing_extraneous")
self.assertEquals(aTable.getPrimaryKey.call_count, 1)
self.subject.setError.assert_called_once_with(self.subject.ERROR_REMOVE)
def test_getReportConfiguration_uses_contentid(self):
report_cfg = self.subject.getReportConfiguration()
self.assertEqual("content -1", report_cfg[-1]['segname'])
####################### PRIVATE METHODS #######################
def _run_batch_size_experiment(self, num_primaries):
BATCH_SIZE = 4
self.subject.GV.opt['-B'] = BATCH_SIZE
self.num_batches = 0
self.num_joins = 0
self.num_starts = 0
self.is_remainder_case = False
for i in range(2, num_primaries):
self.subject.GV.cfg[i] = dict(hostname='host1', port=123, id=1, address='123',
datadir='dir', content=1, dbid=i)
def count_starts():
self.num_starts += 1
def count_joins():
if self.num_starts != BATCH_SIZE:
self.is_remainder_case = True
self.num_joins += 1
if self.num_joins == BATCH_SIZE:
self.num_batches += 1
self.num_joins = 0
self.num_starts = 0
if __name__ == '__main__':
run_tests()
| |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from flask import Blueprint
from flask.ext.restplus import fields, Api
from . import TestCase
class FieldTestCase(TestCase):
def setUp(self):
super(FieldTestCase, self).setUp()
blueprint = Blueprint('api', __name__)
self.api = Api(blueprint)
self.app.register_blueprint(blueprint)
class RawFieldTest(FieldTestCase):
def test_simple_raw_field(self):
field = fields.Raw()
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'object'})
def test_raw_field_with_description(self):
field = fields.Raw(description='A description')
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'object', 'description': 'A description'})
def test_raw_field_with_title(self):
field = fields.Raw(title='A title')
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'object', 'title': 'A title'})
def test_raw_field_with_required(self):
field = fields.Raw(required=True)
self.assertTrue(field.required)
self.assertEqual(field.__schema__, {'type': 'object'})
def test_raw_field_with_readonly(self):
field = fields.Raw(readonly=True)
self.assertEqual(field.__schema__, {'type': 'object', 'readOnly': True})
def test_raw_field_with_default(self):
field = fields.Raw(default='aaa')
self.assertEqual(field.__schema__, {'type': 'object', 'default': 'aaa'})
class StringFieldTest(FieldTestCase):
def test_simple_string_field(self):
field = fields.String()
self.assertFalse(field.required)
self.assertFalse(field.discriminator)
self.assertEqual(field.__schema__, {'type': 'string'})
def test_string_field_with_description(self):
field = fields.String(description='A description')
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'string', 'description': 'A description'})
def test_string_field_with_title(self):
field = fields.String(title='A title')
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'string', 'title': 'A title'})
def test_string_field_with_required(self):
field = fields.String(required=True)
self.assertTrue(field.required)
self.assertEqual(field.__schema__, {'type': 'string'})
def test_string_field_with_readonly(self):
field = fields.String(readonly=True)
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'string', 'readOnly': True})
def test_string_field_with_enum(self):
enum = ['A', 'B', 'C']
field = fields.String(enum=enum)
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'string', 'enum': enum, 'example': enum[0]})
def test_string_field_with_callable_enum(self):
enum = lambda: ['A', 'B', 'C'] # noqa
field = fields.String(enum=enum)
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'string', 'enum': ['A', 'B', 'C'], 'example': 'A'})
def test_string_field_with_default(self):
field = fields.String(default='aaa')
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'string', 'default': 'aaa'})
def test_string_field_with_discriminator(self):
field = fields.String(discriminator=True)
self.assertTrue(field.discriminator)
self.assertTrue(field.required)
self.assertEqual(field.__schema__, {'type': 'string'})
def test_string_field_with_discriminator_override_require(self):
field = fields.String(discriminator=True, required=False)
self.assertTrue(field.discriminator)
self.assertTrue(field.required)
self.assertEqual(field.__schema__, {'type': 'string'})
def test_discriminator_output(self):
model = self.api.model('Test', {
'name': fields.String(discriminator=True),
})
data = self.api.marshal({}, model)
self.assertEqual(data, {'name': 'Test'})
def test_multiple_discriminator_field(self):
model = self.api.model('Test', {
'name': fields.String(discriminator=True),
'name2': fields.String(discriminator=True),
})
with self.assertRaises(ValueError):
self.api.marshal(object(), model)
class IntegerFieldTest(FieldTestCase):
def test_simple_integer_field(self):
field = fields.Integer()
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'integer'})
def test_integer_field_with_description(self):
field = fields.Integer(description='A description')
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'integer', 'description': 'A description'})
def test_integer_field_with_title(self):
field = fields.Integer(title='A title')
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'integer', 'title': 'A title'})
def test_integer_field_with_required(self):
field = fields.Integer(required=True)
self.assertTrue(field.required)
self.assertEqual(field.__schema__, {'type': 'integer'})
def test_integer_field_with_readonly(self):
field = fields.Integer(readonly=True)
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'integer', 'readOnly': True})
def test_integer_field_with_min_max(self):
field = fields.Integer(min=0, max=5)
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'integer', 'minimum': 0, 'maximum': 5})
def test_integer_field_with_default(self):
field = fields.Integer(default=42)
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'integer', 'default': 42})
class BooleanFieldTest(FieldTestCase):
def test_simple_boolean_field(self):
field = fields.Boolean()
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'boolean'})
def test_boolean_field_with_description(self):
field = fields.Boolean(description='A description')
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'boolean', 'description': 'A description'})
def test_boolean_field_with_title(self):
field = fields.Boolean(title='A title')
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'boolean', 'title': 'A title'})
def test_boolean_field_with_required(self):
field = fields.Boolean(required=True)
self.assertTrue(field.required)
self.assertEqual(field.__schema__, {'type': 'boolean'})
def test_boolean_field_with_readonly(self):
field = fields.Boolean(readonly=True)
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'boolean', 'readOnly': True})
def test_boolean_field_with_default(self):
field = fields.Boolean(default=True)
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'boolean', 'default': True})
class FloatFieldTest(FieldTestCase):
def test_simple_float_field(self):
field = fields.Float()
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'number'})
def test_float_field_with_description(self):
field = fields.Float(description='A description')
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'number', 'description': 'A description'})
def test_float_field_with_title(self):
field = fields.Float(title='A title')
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'number', 'title': 'A title'})
def test_float_field_with_required(self):
field = fields.Float(required=True)
self.assertTrue(field.required)
self.assertEqual(field.__schema__, {'type': 'number'})
def test_float_field_with_readonly(self):
field = fields.Float(readonly=True)
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'number', 'readOnly': True})
def test_float_field_with_min_max(self):
field = fields.Float(min=0, max=5)
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'number', 'minimum': 0, 'maximum': 5})
def test_float_field_with_default(self):
field = fields.Float(default=0.5)
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'number', 'default': 0.5})
class FixedFieldTest(FieldTestCase):
def test_simple_fixed_field(self):
field = fields.Fixed()
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'number'})
def test_fixed_field_with_description(self):
field = fields.Fixed(description='A description')
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'number', 'description': 'A description'})
def test_fixed_field_with_title(self):
field = fields.Fixed(title='A title')
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'number', 'title': 'A title'})
def test_fixed_field_with_required(self):
field = fields.Fixed(required=True)
self.assertTrue(field.required)
self.assertEqual(field.__schema__, {'type': 'number'})
def test_fixed_field_with_readonly(self):
field = fields.Fixed(readonly=True)
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'number', 'readOnly': True})
def test_fixed_field_with_min_max(self):
field = fields.Fixed(min=0, max=5)
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'number', 'minimum': 0, 'maximum': 5})
def test_fixed_field_with_default(self):
field = fields.Fixed(default=0.5)
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'number', 'default': 0.5})
class ArbitraryFieldTest(FieldTestCase):
def test_simple_arbitrary_field(self):
field = fields.Arbitrary()
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'number'})
def test_arbitrary_field_with_required(self):
field = fields.Arbitrary(required=True)
self.assertTrue(field.required)
self.assertEqual(field.__schema__, {'type': 'number'})
def test_arbitrary_field_with_description(self):
field = fields.Arbitrary(description='A description')
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'number', 'description': 'A description'})
def test_arbitrary_field_with_title(self):
field = fields.Arbitrary(title='A title')
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'number', 'title': 'A title'})
def test_arbitrary_field_with_readonly(self):
field = fields.Arbitrary(readonly=True)
self.assertEqual(field.__schema__, {'type': 'number', 'readOnly': True})
def test_arbitrary_field_with_min_max(self):
field = fields.Arbitrary(min=0, max=5)
self.assertEqual(field.__schema__, {'type': 'number', 'minimum': 0, 'maximum': 5})
def test_arbitrary_field_with_default(self):
field = fields.Arbitrary(default=0.5)
self.assertEqual(field.__schema__, {'type': 'number', 'default': 0.5})
class DatetimeFieldTest(FieldTestCase):
def test_simple_datetime_field(self):
field = fields.DateTime()
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'string', 'format': 'date-time'})
def test_datetime_field_with_required(self):
field = fields.DateTime(required=True)
self.assertTrue(field.required)
self.assertEqual(field.__schema__, {'type': 'string', 'format': 'date-time'})
def test_datetime_field_with_description(self):
field = fields.DateTime(description='A description')
self.assertEqual(field.__schema__, {'type': 'string', 'format': 'date-time', 'description': 'A description'})
def test_datetime_field_with_title(self):
field = fields.DateTime(title='A title')
self.assertEqual(field.__schema__, {'type': 'string', 'format': 'date-time', 'title': 'A title'})
def test_datetime_field_with_default(self):
field = fields.DateTime(default='2014-08-25')
self.assertEqual(field.__schema__, {'type': 'string', 'format': 'date-time', 'default': '2014-08-25'})
class FormatedStringFieldTest(FieldTestCase):
def test_simple_formatted_string_field(self):
field = fields.FormattedString('Hello {name}')
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'string'})
def test_formatted_string_field_with_required(self):
field = fields.FormattedString('Hello {name}', required=True)
self.assertTrue(field.required)
self.assertEqual(field.__schema__, {'type': 'string'})
def test_formatted_string_field_with_description(self):
field = fields.FormattedString('Hello {name}', description='A description')
self.assertEqual(field.__schema__, {'type': 'string', 'description': 'A description'})
def test_formatted_field_with_title(self):
field = fields.FormattedString('Hello {name}', title='A title')
self.assertEqual(field.__schema__, {'type': 'string', 'title': 'A title'})
def test_formatted_string_field_with_readonly(self):
field = fields.FormattedString('Hello {name}', readonly=True)
self.assertEqual(field.__schema__, {'type': 'string', 'readOnly': True})
class UrlFieldTest(FieldTestCase):
def test_simple_url_field(self):
field = fields.Url('endpoint')
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'string'})
def test_url_field_with_required(self):
field = fields.Url('endpoint', required=True)
self.assertTrue(field.required)
self.assertEqual(field.__schema__, {'type': 'string'})
def test_url_field_with_description(self):
field = fields.Url('endpoint', description='A description')
self.assertEqual(field.__schema__, {'type': 'string', 'description': 'A description'})
def test_url_field_with_title(self):
field = fields.Url('endpoint', title='A title')
self.assertEqual(field.__schema__, {'type': 'string', 'title': 'A title'})
def test_url_field_with_readonly(self):
field = fields.Url('endpoint', readonly=True)
self.assertEqual(field.__schema__, {'type': 'string', 'readOnly': True})
class NestedFieldTest(FieldTestCase):
def test_nested_field(self):
nested_fields = self.api.model('NestedModel', {'name': fields.String})
field = fields.Nested(nested_fields)
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'$ref': '#/definitions/NestedModel'})
def test_nested_field_with_required(self):
nested_fields = self.api.model('NestedModel', {'name': fields.String})
field = fields.Nested(nested_fields, required=True)
self.assertTrue(field.required)
self.assertFalse(field.allow_null)
self.assertEqual(field.__schema__, {'$ref': '#/definitions/NestedModel'})
def test_nested_field_with_description(self):
nested_fields = self.api.model('NestedModel', {'name': fields.String})
field = fields.Nested(nested_fields, description='A description')
self.assertEqual(field.__schema__, {'$ref': '#/definitions/NestedModel', 'description': 'A description'})
def test_nested_field_with_title(self):
nested_fields = self.api.model('NestedModel', {'name': fields.String})
field = fields.Nested(nested_fields, title='A title')
self.assertEqual(field.__schema__, {'$ref': '#/definitions/NestedModel', 'title': 'A title'})
def test_nested_field_with_allow_null(self):
nested_fields = self.api.model('NestedModel', {'name': fields.String})
field = fields.Nested(nested_fields, allow_null=True)
self.assertFalse(field.required)
self.assertTrue(field.allow_null)
self.assertEqual(field.__schema__, {'$ref': '#/definitions/NestedModel'})
def test_nested_field_with_readonly(self):
api = Api(self.app)
nested_fields = api.model('NestedModel', {'name': fields.String})
field = fields.Nested(nested_fields, readonly=True)
self.assertEqual(field.__schema__, {'$ref': '#/definitions/NestedModel', 'readOnly': True})
def test_nested_field_as_list(self):
nested_fields = self.api.model('NestedModel', {'name': fields.String})
field = fields.Nested(nested_fields, as_list=True)
self.assertTrue(field.as_list)
self.assertEqual(field.__schema__, {'type': 'array', 'items': {'$ref': '#/definitions/NestedModel'}})
def test_nested_field_as_list_is_reusable(self):
nested_fields = self.api.model('NestedModel', {'name': fields.String})
field = fields.Nested(nested_fields, as_list=True)
self.assertEqual(field.__schema__, {'type': 'array', 'items': {'$ref': '#/definitions/NestedModel'}})
field = fields.Nested(nested_fields)
self.assertEqual(field.__schema__, {'$ref': '#/definitions/NestedModel'})
class ListFieldTest(FieldTestCase):
def test_list_field(self):
field = fields.List(fields.String)
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'array', 'items': {'type': 'string'}})
def test_list_field_with_required(self):
field = fields.List(fields.String, required=True)
self.assertTrue(field.required)
self.assertEqual(field.__schema__, {'type': 'array', 'items': {'type': 'string'}})
def test_list_field_with_description(self):
field = fields.List(fields.String, description='A description')
self.assertEqual(field.__schema__, {'type': 'array', 'items': {'type': 'string'}, 'description': 'A description'})
def test_list_field_with_title(self):
field = fields.List(fields.String, title='A title')
self.assertEqual(field.__schema__, {'type': 'array', 'items': {'type': 'string'}, 'title': 'A title'})
def test_list_field_with_readonly(self):
field = fields.List(fields.String, readonly=True)
self.assertEqual(field.__schema__, {'type': 'array', 'items': {'type': 'string'}, 'readOnly': True})
def test_list_field_with_nested_field(self):
nested_fields = self.api.model('NestedModel', {'name': fields.String})
field = fields.List(fields.Nested(nested_fields))
self.assertEqual(field.__schema__, {'type': 'array', 'items': {'$ref': '#/definitions/NestedModel'}})
class ClassNameFieldTest(FieldTestCase):
def test_simple_string_field(self):
field = fields.ClassName()
self.assertFalse(field.required)
self.assertFalse(field.discriminator)
self.assertEqual(field.__schema__, {'type': 'string'})
def test_default_output_classname(self):
model = self.api.model('Test', {
'name': fields.ClassName(),
})
class FakeClass(object):
pass
data = self.api.marshal(FakeClass(), model)
self.assertEqual(data, {'name': 'FakeClass'})
def test_output_dash(self):
model = self.api.model('Test', {
'name': fields.ClassName(dash=True),
})
class FakeClass(object):
pass
data = self.api.marshal(FakeClass(), model)
self.assertEqual(data, {'name': 'fake_class'})
class PolymorphTest(FieldTestCase):
def test_polymorph_field(self):
parent = self.api.model('Person', {
'name': fields.String,
})
child1 = self.api.inherit('Child1', parent, {
'extra1': fields.String,
})
child2 = self.api.inherit('Child2', parent, {
'extra2': fields.String,
})
class Child1(object):
name = 'child1'
extra1 = 'extra1'
class Child2(object):
name = 'child2'
extra2 = 'extra2'
mapping = {
Child1: child1,
Child2: child2
}
thing = self.api.model('Thing', {
'owner': fields.Polymorph(mapping),
})
def data(cls):
return self.api.marshal({'owner': cls()}, thing)
self.assertEqual(data(Child1), {'owner': {
'name': 'child1',
'extra1': 'extra1'
}})
self.assertEqual(data(Child2), {'owner': {
'name': 'child2',
'extra2': 'extra2'
}})
def test_polymorph_field_no_common_ancestor(self):
child1 = self.api.model('Child1', {
'extra1': fields.String,
})
child2 = self.api.model('Child2', {
'extra2': fields.String,
})
class Child1(object):
pass
class Child2(object):
pass
mapping = {
Child1: child1,
Child2: child2
}
with self.assertRaises(ValueError):
fields.Polymorph(mapping)
def test_polymorph_field_unknown_class(self):
parent = self.api.model('Person', {
'name': fields.String,
})
child1 = self.api.inherit('Child1', parent, {
'extra1': fields.String,
})
child2 = self.api.inherit('Child2', parent, {
'extra2': fields.String,
})
class Child1(object):
name = 'child1'
extra1 = 'extra1'
class Child2(object):
name = 'child2'
extra2 = 'extra2'
mapping = {
Child1: child1,
Child2: child2
}
thing = self.api.model('Thing', {
'owner': fields.Polymorph(mapping),
})
with self.assertRaises(ValueError):
self.api.marshal({'owner': object()}, thing)
def test_polymorph_field_ambiguous_mapping(self):
parent = self.api.model('Parent', {
'name': fields.String,
})
child = self.api.inherit('Child', parent, {
'extra': fields.String,
})
class Parent(object):
name = 'parent'
class Child(Parent):
extra = 'extra'
mapping = {
Parent: parent,
Child: child
}
thing = self.api.model('Thing', {
'owner': fields.Polymorph(mapping),
})
with self.assertRaises(ValueError):
self.api.marshal({'owner': Child()}, thing)
def test_polymorph_field_required_default(self):
parent = self.api.model('Person', {
'name': fields.String,
})
child1 = self.api.inherit('Child1', parent, {
'extra1': fields.String,
})
child2 = self.api.inherit('Child2', parent, {
'extra2': fields.String,
})
class Child1(object):
name = 'child1'
extra1 = 'extra1'
class Child2(object):
name = 'child2'
extra2 = 'extra2'
mapping = {
Child1: child1,
Child2: child2
}
thing = self.api.model('Thing', {
'owner': fields.Polymorph(mapping, required=True, default={'name': 'default'}),
})
data = self.api.marshal({}, thing)
self.assertEqual(data, {'owner': {
'name': 'default'
}})
def test_polymorph_field_not_required(self):
parent = self.api.model('Person', {
'name': fields.String,
})
child1 = self.api.inherit('Child1', parent, {
'extra1': fields.String,
})
child2 = self.api.inherit('Child2', parent, {
'extra2': fields.String,
})
class Child1(object):
name = 'child1'
extra1 = 'extra1'
class Child2(object):
name = 'child2'
extra2 = 'extra2'
mapping = {
Child1: child1,
Child2: child2
}
thing = self.api.model('Thing', {
'owner': fields.Polymorph(mapping),
})
data = self.api.marshal({}, thing)
self.assertEqual(data, {'owner': None})
def test_polymorph_with_discriminator(self):
parent = self.api.model('Person', {
'name': fields.String,
'model': fields.String(discriminator=True),
})
child1 = self.api.inherit('Child1', parent, {
'extra1': fields.String,
})
child2 = self.api.inherit('Child2', parent, {
'extra2': fields.String,
})
class Child1(object):
name = 'child1'
extra1 = 'extra1'
class Child2(object):
name = 'child2'
extra2 = 'extra2'
mapping = {
Child1: child1,
Child2: child2
}
thing = self.api.model('Thing', {
'owner': fields.Polymorph(mapping),
})
def data(cls):
return self.api.marshal({'owner': cls()}, thing)
self.assertEqual(data(Child1), {'owner': {
'name': 'child1',
'model': 'Child1',
'extra1': 'extra1'
}})
self.assertEqual(data(Child2), {'owner': {
'name': 'child2',
'model': 'Child2',
'extra2': 'extra2'
}})
class CustomFieldTest(FieldTestCase):
def test_custom_field(self):
class CustomField(fields.Integer):
__schema_format__ = 'int64'
field = CustomField()
self.assertEqual(field.__schema__, {'type': 'integer', 'format': 'int64'})
| |
import bs4
import logging
import random
import requests
import threading
import time
from modules.l2_miner.common import HEADERS
from modules.l2_miner.common import SERVER_ADDR
from modules.l2_miner.common import URL_PREFIX
from modules.utils import UnimplementedMethodCalled
from modules.utils import get_exception_msg
from modules.utils import sleep_if
class BoardCacheInterface(object):
@property
def newest_doc(self):
raise UnimplementedMethodCalled()
@property
def oldest_doc(self):
raise UnimplementedMethodCalled()
def add_doc(self, idid, url):
raise UnimplementedMethodCalled()
class _UrlGenerator(object):
def __init__(self,
urls_getter, url_num_getter,
min_url_num, max_page_idx, urls, prev_url):
self._urls_getter = urls_getter
self._url_num_getter = url_num_getter
self._min_url_num = min_url_num
self._curr_page_idx = max_page_idx
self._curr_page_urls = urls
self._prev_url = prev_url
def __iter__(self):
return self
def __next__(self):
while self._curr_page_idx > 1 and not self._curr_page_urls:
self._curr_page_idx -= 1
self._curr_page_urls = self._urls_getter(self._curr_page_idx)
if not self._curr_page_urls:
return self._prev_url[ : -len('.X.XXX.html')] + '_x'
curr = self._curr_page_urls.pop()
if self._url_num_getter(curr) <= self._min_url_num:
return self._prev_url[ : -len('.X.XXX.html')] + '_x'
self._prev_url = SERVER_ADDR + curr
return SERVER_ADDR + curr
class _UpdateThread(threading.Thread):
def __init__(self, logger, died_callback, cache, max_id, url_generator):
super(_UpdateThread, self).__init__()
self._logger = logger
self._died_callback = died_callback
self._cache = cache
self._curr_id = max_id
self._url_generator = url_generator
self._stop_flag = False
def run(self):
try:
# Iterately asks the cache to add an document from new to old.
for url in self._url_generator:
succ = self._cache.add_doc(self._curr_id, url)
self._curr_id -= 1
if not succ or self._curr_id < 0 or self._stop_flag:
break
except Exception as e:
self._logger.warning(get_exception_msg(e))
self._died_callback(self)
def stop(self):
self._stop_flag = True
class BoardListener(threading.Thread):
def __init__(self, logger, board_name, update_timestamp, cache):
super(BoardListener, self).__init__()
self._logger = logger
self._board_name = board_name
self._url_prefix = URL_PREFIX + board_name + '/'
self._update_timestamp = update_timestamp
self._cache = cache
self._stop_flag = False
self._update_threads = []
self._died_threads = []
self._sync_lock = threading.Lock()
logging.getLogger('urllib3').setLevel(logging.WARNING)
def run(self):
while not self._stop_flag:
try:
newest_doc = self._cache.newest_doc
oldest_doc = self._cache.oldest_doc
max_page_idx = self._get_max_page_idx()
self._run_fetch_new_docs(newest_doc, max_page_idx)
self._run_fill_old_docs(oldest_doc, max_page_idx)
except Exception as e:
self._logger.warn(get_exception_msg(e))
if not sleep_if(self._update_timestamp,
self._clean_and_check_should_continue):
break
def stop(self):
with self._sync_lock:
self._stop_flag = True
for thr in self._update_threads:
thr.stop()
thr.join()
self._update_threads = []
with self._sync_lock:
self._clean_died_threads()
def get_url_compare_num(self, url):
if url is None:
return 0
a = url[url.find(self._board_name) + len(self._board_name) + 3: ]
if '.' in a:
return int(a[ : a.find('.')])
else:
return int(a[ : a.find('_')]) - 0.5
def _run_fetch_new_docs(self, newest_doc, max_page_idx):
if newest_doc:
url_num = self.get_url_compare_num(newest_doc.url)
idid = newest_doc.meta_data.idid
else:
url_num = idid = -1
self._logger.info('Estimates the number of unfetched documents.')
n = self._estimate_num_urls_between(url_num, max_page_idx)
if n > 0:
self._logger.info(
'Found about %d new docs, update the cache.' % n)
urls = self._get_page_urls(max_page_idx)
self._start_update_thread(idid + n, url_num, max_page_idx, urls)
else:
self._logger.info('Great, there is no new document on the server.')
def _run_fill_old_docs(self, oldest_doc, max_page_idx):
if oldest_doc:
idid = oldest_doc.meta_data.idid
self._logger.info('Fetch document with id less than %d.' % idid)
url_num = self.get_url_compare_num(oldest_doc.url)
(p, us) = self._get_page_contain_url_num(url_num, max_page_idx)
us = [u for u in us if self.get_url_compare_num(u) < url_num]
self._start_update_thread(
idid - 1, -1, max_page_idx, us, oldest_doc.url)
else:
self._logger.info('Great, the mirror is complete.')
def _clean_and_check_should_continue(self):
with self._sync_lock:
if self._stop_flag:
return False
else:
self._clean_died_threads()
return True
def _clean_died_threads(self):
for thr in self._died_threads:
thr.join()
self._died_threads = []
def _start_update_thread(
self, max_id, min_url_num, max_page_idx, urls, prev_url=''):
with self._sync_lock:
if not self._stop_flag:
url_generator = _UrlGenerator(self._get_page_urls,
self.get_url_compare_num,
min_url_num, max_page_idx,
urls, prev_url)
thr = _UpdateThread(self._logger.getChild('UpdateDaemon'),
self._update_thread_died_callback,
self._cache, max_id, url_generator)
self._update_threads.append(thr)
thr.start()
def _update_thread_died_callback(self, thr):
with self._sync_lock:
if not self._stop_flag:
self._died_threads.append(thr)
self._update_threads.remove(thr)
def _get_max_page_idx(self):
upper = 1
while self._is_page_availiable(upper):
upper *= 2
lower = upper // 2
while lower + 1 < upper:
mid = (lower + upper) // 2
if self._is_page_availiable(mid):
lower = mid
else:
upper = mid
self._logger.info('There are %d pages.' % lower)
return lower
def _estimate_num_urls_between(self, min_url_num, max_page_idx):
(lower, us) = self._get_page_contain_url_num(min_url_num, max_page_idx)
if max_page_idx - lower > 50:
m = (max_page_idx - lower) * 20
else:
# If the number of pages after the page which contains min_url_num
# is not too large, we can estimate the number of urls tightly.
m = sum(len(self._get_page_urls(i))
for i in range(lower + 1, max_page_idx + 1))
n = len([a for a in us if self.get_url_compare_num(a) > min_url_num])
return n + m
def _get_page_contain_url_num(self, url_num, max_page_idx):
lower, upper = 1, max_page_idx
while lower < upper:
mid = (lower + upper) // 2
re_calc = False
# We need to re-fetch another page's urls if all the documents in
# the `mid`-th page are deleted.
while True:
urls = self._get_page_urls(mid)
if urls:
break
if mid == lower:
lower += 1
re_calc = True
break
elif mid == upper:
upper -= 1
re_calc = True
break
else:
mid = random.randint(lower, upper)
if re_calc:
continue
if url_num < self.get_url_compare_num(urls[0]):
upper = mid - 1
elif self.get_url_compare_num(urls[-1]) < url_num:
lower = mid + 1
else:
lower = upper = mid
return (lower, self._get_page_urls(lower))
def _get_page_urls(self, page_idx):
while True:
request = requests.get(self._get_url(page_idx), headers=HEADERS)
if request.status_code == 200:
break
time.sleep(0.1)
page = bs4.BeautifulSoup(request.text, 'lxml')
ret = []
for chd in page.find(attrs={'class': 'r-list-container'}).children:
try:
c = chd.get('class')
if c is not None:
if 'r-list-sep' in c:
break
elif 'r-ent' in c:
a = chd.find_all('a')
if a:
ret.append(a[0].get('href'))
except Exception as _:
pass
return ret
def _is_page_availiable(self, page_idx):
for i in range(10):
request = requests.get(self._get_url(page_idx), headers=HEADERS)
if request.status_code == 200:
return True
time.sleep(0.1)
return False
def _get_url(self, page_idx):
return self._url_prefix + 'index%d.html' % page_idx
| |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016, Giacomo Cariello. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from copy import deepcopy
from datetime import datetime
import logging
import os
import platform
import re
from shutil import rmtree
from subprocess import Popen, PIPE, STDOUT
import tarfile
import tempfile
import time
from builtins import map # pylint: disable=redefined-builtin
from builtins import str # pylint: disable=redefined-builtin
from builtins import object # pylint: disable=redefined-builtin
from distutils.dir_util import copy_tree
from future import standard_library
import tzlocal
from zc.buildout import UserError
from zc.buildout.download import Download
from dockeroo import BaseRecipe, BaseSubRecipe
from dockeroo.docker_machine import DockerMachine
from dockeroo.utils import ExternalProcessError
from dockeroo.utils import reify, parse_datetime, random_name, listify
from dockeroo.utils import mkdir
standard_library.install_aliases()
DEFAULT_TIMEOUT = 180
FNULL = open(os.devnull, 'w')
SEPARATOR = '|'
class Archive(object):
def __init__(self, url=None, path=None, prefix=None, md5sum=None):
self.url = url
self.path = path
self.prefix = prefix
self.md5sum = md5sum
def download(self, buildout):
download = Download(buildout['buildout'], hash_name=False)
self.path, _ = download(self.url, md5sum=self.md5sum)
def __repr__(self):
return self.url or self.path
class DockerProcess(Popen):
def __init__(self, engine, args, stdin=None, stdout=None, stderr=PIPE, env=None, config=None):
self.engine = engine
args = ['docker'] + args
if config is not None:
args = ['--config', config] + args
custom_env = os.environ.copy()
custom_env.update(engine.client_environment)
custom_env.update(env or {})
self.engine.logger.debug("Running command: %s", ' '.join(args))
super(DockerProcess, self).__init__(
args, stdin=stdin, stdout=stdout, stderr=stderr, close_fds=True, env=custom_env)
class DockerRegistryLogin(object): # pylint: disable=too-few-public-methods
def __init__(self, engine, registry, username, password):
self.engine = engine
self.registry = "https://{}/v1/".format(registry)
self.username = username
self.password = password
self.config_path = None
def __enter__(self):
self.config_path = tempfile.mkdtemp()
proc = DockerProcess(self.engine,
['login', '-u', self.username, '-p', self.password, self.registry],
config=self.config_path)
if proc.wait() != 0:
raise ExternalProcessError(
"Error requesting \"docker login {}\"".format(self.registry), proc)
return self
def __exit__(self, exc_type, exc_value, traceback):
rmtree(self.config_path)
class DockerEngine(object): # pylint: disable=too-many-public-methods
def __init__(self, logger=None, url=None, tlsverify=None, tlscertpath=None, machine_name=None,
shell='/bin/sh', timeout=DEFAULT_TIMEOUT):
self.logger = logger or logging.getLogger(__name__)
self.shell = shell
self.timeout = timeout
self._tlscertpath = tlscertpath
self._tlsverify = tlsverify
self._url = url
if machine_name is None:
machine_name = os.environ.get('DOCKER_MACHINE_NAME', None)
if machine_name is not None:
self.machine = DockerMachine(machine_name, logger=self.logger)
else:
if DockerMachine.machines(name='default'):
self.machine = DockerMachine('default', logger=self.logger)
else:
self.machine = None
@property
@reify
def client_environment(self):
"""
Example:
>>> dm = DockerEngine(machine_name='default')
>>> 'DOCKER_TLS_VERIFY' in dm.client_environment
True
>>> 'DOCKER_HOST' in dm.client_environment
True
>>> 'DOCKER_CERT_PATH' in dm.client_environment
True
>>> 'DOCKER_MACHINE_NAME' in dm.client_environment
True
>>> dm.client_environment['DOCKER_MACHINE_NAME'] == dm.machine.name
True
"""
env = {
'DOCKER_HOST': self.url,
'DOCKER_TLS_VERIFY': str(int(self.tlsverify)),
'DOCKER_CERT_PATH': self.tlscertpath,
}
if self.machine is not None:
env['DOCKER_MACHINE_NAME'] = self.machine.name
return env
@property
@reify
def client_version(self):
r"""
Example:
>>> dm = DockerEngine(machine_name='default')
>>> bool(re.search(r'^\d+.\d+.\d+$', dm.client_version))
True
"""
proc = DockerProcess(self, ['version', '-f', '{{.Client.Version}}'], stdout=PIPE)
if proc.wait() != 0:
raise ExternalProcessError("Error requesting version", proc)
return proc.stdout.read().rstrip(os.linesep)
@property
@reify
def platform(self):
"""
Example:
>>> dm = DockerEngine(machine_name='default')
>>> dm.platform in (
... 'arm', 'armv4', 'armv4t', 'armv5te', 'armv6j', 'armv7a',
... 'hppa', 'hppa1.1', 'hppa2.0', 'hppa64',
... 'i386', 'i486', 'i586', 'i686',
... 'ia64',
... 'm68k',
... 'mips', 'mips64',
... 'powerpc', 'powerpc64',
... 's390',
... 'sh', 'sh4', 'sh64',
... 'sparc', 'sparc64',
... 'x86_64')
True
"""
if self.machine is not None:
return self.machine.platform
proc = DockerProcess(self, ['info'], stdout=PIPE)
if proc.wait() != 0:
raise ExternalProcessError("Error requesting info", proc)
result = proc.stdout.read().splitlines()
return [l for l in result if l.startswith('Architecture: ')][0].split(': ')[1]
@property
@reify
def url(self):
"""
Example:
>>> dm = DockerEngine(machine_name='default')
>>> dm.url.startswith('tcp://')
True
"""
if self._url is not None:
return self._url
elif self.machine is not None:
return self.machine.url
else:
return None
@property
@reify
def tlsverify(self):
if self._tlsverify is not None:
return self._tlsverify
elif self.machine is not None:
return self.machine.inspect['HostOptions']['EngineOptions']['TlsVerify']
else:
return None
@property
@reify
def tlscertpath(self):
if self._tlscertpath is not None:
return self._tlscertpath
elif self.machine is not None:
return self.machine.inspect['HostOptions']['AuthOptions']['StorePath'].encode('utf-8')
else:
return None
def build_dockerfile(self, tag, path, **kwargs):
self.logger.info("Building Dockerfile from context \"%s\"", path)
args = ['build', '-t', tag]
for key, value in kwargs.items():
args += ['--build-arg', '{}={}'.format(key, value)]
args.append(path)
proc = DockerProcess(self, args)
if proc.wait() != 0:
raise ExternalProcessError(
"Error building Dockerfile from context \"{}\"".format(path), proc)
def clean_stale_images(self):
for image in self.images(dangling='true'):
self.remove_image(image['image'])
for image in self.images('<none>'):
self.remove_image(image['image'])
def commit_container(self, container, image, command=None, user=None,
labels=None, expose=None, volumes=None):
self.logger.info(
"Committing container \"%s\" to image \"%s\"", container, image)
args = ['commit']
if command:
args.append(
"--change='CMD [{}]'".format(', '.join(
['"{}"'.format(x) for x in command.split()])))
if user:
args.append("--change='USER \"{}\"'".format(user))
for key, value in (labels or {}).items():
args.append("--change='LABEL \"{}\"=\"{}\"".format(key, value))
for port in expose or []:
args.append("--change='EXPOSE {}'".format(port))
for volume in volumes or []:
args.append("--change='VOLUME {}'".format(volume))
args += [container, image]
proc = DockerProcess(self, args, stdout=FNULL)
if proc.wait() != 0:
raise ExternalProcessError(
"Error committing container \"{}\"".format(container), proc)
@listify
def containers(self, include_stopped=False, **filters):
params = ['ID', 'Image', 'Command', 'CreatedAt', 'RunningFor',
'Ports', 'Status', 'Size', 'Names', 'Labels', 'Mounts']
args = ['ps', '--format',
SEPARATOR.join(['{{{{.{}}}}}'.format(x) for x in params])]
if include_stopped:
args.append('-a')
for key, value in filters.items():
args += ['-f', '{}={}'.format(key, value)]
proc = DockerProcess(self, args, stdout=PIPE)
for line in proc.stderr.read().splitlines():
self.logger.error(line)
proc.wait()
params_map = dict([(x, re.sub(
'((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))', r'_\1', x).lower()) for x in params])
for line in proc.stdout.read().splitlines():
container = {}
values = line.split(SEPARATOR)
for num, param in enumerate(params):
if param in ['Labels', 'Mounts', 'Names', 'Ports']:
container[params_map[param]] = values[num].split(',') if values[num] else []
if param == 'Labels':
container[params_map[param]] = [tuple(
x.split('=')) for x in container[params_map[param]]]
elif param == 'Ports':
container[params_map[param]] = [tuple(
x.split('->')) for x in container[params_map[param]]]
elif param == 'Status':
container[params_map[param]] = {
'created': 'created',
'dead': 'dead',
'exited': 'exited',
'paused': 'paused',
'restarting': 'restarting',
'up': 'running',
}[values[num].split(' ')[0].lower()]
elif param == 'CreatedAt':
container[params_map[param]] = parse_datetime(values[num]).astimezone(
tzlocal.get_localzone()).replace(tzinfo=None)
else:
container[params_map[param]] = values[num] if values[num] else None
yield container
def copy_image_to_container(self, image, container, src, dst):
tmp = random_name()
self.create_container(tmp, image)
self.copy_path(tmp, container, src, dst=dst, dst_exec=True)
self.remove_container(tmp)
def copy_layout(self, src, dst):
self.logger.info("Copying layout \"%s\" on \"%s\"", src, dst)
return copy_tree(src, dst)
def copy_path(self, container_src, container_dst, src, dst=None, dst_exec=False, processor=None):
if dst is None:
dst = os.path.join(*os.path.dirname(src).split(os.sep))
if processor is None:
processor = lambda x: x
self.logger.info("Copying files from container \"%s:%s\" to container \"%s:%s\"",
container_src, src, container_dst, dst)
if src.endswith('/'):
src_prefix = os.path.dirname(src).split(os.sep)[-1] + '/'
else:
src_prefix = ''
def layout_filter(obj):
if dst is not None:
if len(obj.name) > len(src_prefix):
obj.name = os.path.join(dst, obj.name[len(src_prefix):])
else:
obj.name = dst
if obj.type == tarfile.LNKTYPE:
if len(obj.linkname) > len(src_prefix):
obj.linkname = os.path.join(
dst, obj.linkname[len(src_prefix):])
else:
obj.linkname = dst
return obj
p_in = DockerProcess(self, ['cp', "{}:{}".format(
container_src, src), "-"], stdout=PIPE)
if dst_exec:
p_out = DockerProcess(self, ['exec', '-i', container_dst, "tar", "-xpf",
"-", "-C", "/"], stdin=PIPE)
else:
p_out = DockerProcess(self, ['cp', "-", "{}:/".format(container_dst)], stdin=PIPE)
tar_in = tarfile.open(fileobj=p_in.stdout, mode='r|')
tar_out = tarfile.open(fileobj=p_out.stdin, mode='w|')
for tarinfo in tar_in:
tarinfo = processor(layout_filter(tarinfo))
if tarinfo is None:
continue
if tarinfo.isreg():
tar_out.addfile(tarinfo, fileobj=tar_in.extractfile(tarinfo))
else:
tar_out.addfile(tarinfo)
tar_in.close()
tar_out.close()
p_in.stdout.close()
p_out.stdin.close()
if p_in.wait() != 0:
raise ExternalProcessError(
"Error processing path on container \"{}\"".format(container_src), p_in)
if p_out.wait() != 0:
raise ExternalProcessError(
"Error processing path on container \"{}\"".format(container_dst), p_out)
def create_container(self, container, image, command=None, privileged=False, run=False, # pylint: disable=too-many-arguments,too-many-locals,too-many-branches
tty=False, volumes=None, volumes_from=None, user=None, networks=None,
links=None, network_aliases=None, env=None, ports=None):
if not any([x for x in self.containers(include_stopped=True) if container in x['names']]):
self.logger.info("Creating container \"%s\"", container)
args = ['create', '--name="{}"'.format(container)]
for key, value in (env or {}).items():
args += ['-e', "{}={}".format(key, value)]
for key, value in (ports or {}).items():
args += ['-p', "{}:{}".format(key, value)]
for key, value in (links or {}).items():
args += ['--link', "{}:{}".format(key, value)]
for network in networks or []:
args += ['--network', network]
for network_alias in network_aliases or []:
args += ['--network-alias', network_alias]
if privileged:
args.append('--privileged')
if tty:
args.append('--tty')
if user:
args += ['-u', user]
if volumes:
args += ["--volume={}:{}".format(key, value) for key, value in volumes]
if volumes_from:
args.append("--volumes-from={}".format(volumes_from))
args.append(image)
if command:
args += command.split(" ")
proc = DockerProcess(self, args, stdout=FNULL)
if proc.wait() != 0:
raise ExternalProcessError(
"Error creating container \"{}\"".format(container), proc)
if run:
self.start_container(container)
def create_network(self, network, driver='bridge', gateway=None, subnet=None,
ip_range=None, ipv6=False, internal=False):
self.logger.info("Creating network \"%s\"", network)
args = ['network', 'create', '-d', driver]
if gateway is not None:
args += ['--gateway', gateway]
if ip_range is not None:
args += ['--ip-range', ip_range]
if subnet is not None:
args += ['--subnet', subnet]
if ipv6:
args.append('--ipv6')
if internal:
args.append('--internal')
args.append(network)
proc = DockerProcess(self, args)
if proc.wait() != 0:
raise ExternalProcessError("Error creating network \"{}\"".format(network), proc)
def create_volume(self, volume):
if self.volumes(name=volume):
return
self.logger.info("Creating volume \"%s\"", volume)
args = ['volume', 'create', '--name="{}"'.format(volume)]
proc = DockerProcess(self, args, stdout=FNULL)
if proc.wait() != 0:
raise ExternalProcessError("Error creating volume \"{}\"".format(volume), proc)
def export_files(self, container, src, dst):
self.logger.info(
"Export files from \"%s:%s\" to path \"%s\"", container, src, dst)
args = ['cp', "{}:{}".format(container, src), "-"]
proc = DockerProcess(self, args, stdout=PIPE)
tar = tarfile.open(fileobj=proc.stdout, mode='r|')
for fin in tar:
if not fin.isreg():
continue
fout = open(os.path.join(dst, os.path.basename(fin.name)), 'w')
fout.write(tar.extractfile(fin).read())
fout.close()
tar.close()
if proc.wait() != 0:
raise ExternalProcessError(
"Error exporting files from container \"{}\"".format(container), proc)
def get_container_ip_address(self, container):
args = ['inspect', '--format="{{.NetworkSettings.IPAddress}}"', container]
proc = DockerProcess(self, args, stdout=PIPE)
if proc.wait() != 0:
raise ExternalProcessError(
"Error requesting \"docker inspect {}\"".format(container), proc)
return proc.stdout.read().rstrip(os.linesep)
def images(self, name=None, **filters):
params = ['ID', 'Repository', 'Tag', 'Digest',
'CreatedSince', 'CreatedAt', 'Size']
args = ['images', '--format',
SEPARATOR.join(['{{{{.{}}}}}'.format(x) for x in params])]
for key, value in filters.items():
args += ['-f', '{}={}'.format(key, value)]
if name is not None:
args.append(name)
proc = DockerProcess(self, args, stdout=PIPE)
for line in proc.stderr.read().splitlines():
self.logger.error(line)
proc.wait()
params_map = dict([(x, re.sub(
'((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))', r'_\1', x).lower()) for x in params])
for line in proc.stdout.read().splitlines():
image = {}
values = line.split(SEPARATOR)
for num, param in enumerate(params):
if param == 'CreatedAt':
image[params_map[param]] = parse_datetime(values[num]).astimezone(
tzlocal.get_localzone()).replace(tzinfo=None)
else:
image[params_map[param]] = values[num] \
if values[num] and values[num] != '<none>' else None
if image['repository'] and image['tag']:
image['image'] = "{}:{}".format(image['repository'], image['tag'])
else:
image['image'] = image['id']
yield image
def import_archives(self, image, *archives):
paths = set()
args = ['import', '-', image]
proc = DockerProcess(self, args, stdin=PIPE)
tar_out = tarfile.open(fileobj=proc.stdin, mode='w|')
def layout_filter(obj, arc):
if not obj.name.startswith(os.sep):
obj.name = "/{}".format(obj.name)
obj.name = os.path.normpath(obj.name)
if arc.prefix:
obj.name = os.path.join(
arc.prefix, obj.name.lstrip(os.sep))
if obj.name.endswith('/') and len(obj.name) > 1:
obj.name = obj.name[:-1]
if obj.type == tarfile.LNKTYPE and obj.linkname:
if obj.linkname.startswith(".{}".format(os.sep)):
obj.linkname = obj.linkname[1:]
if obj.linkname.startswith(os.sep) and arc.prefix:
obj.linkname = os.path.join(
arc.prefix, obj.linkname.lstrip(os.sep))
if obj.linkname.endswith('/') and len(obj.linkname) > 1:
obj.linkname = obj.linkname[:-1]
return obj
for archive in archives:
self.logger.info("Importing archive \"%s\" into image \"%s:%s\"",
archive, image, archive.prefix or '/')
tar_in = tarfile.open(name=archive.path, mode='r')
if archive.prefix:
segments = [os.sep]
for segment in os.path.dirname(archive.prefix).split(os.sep):
segments.append(segment)
path = os.path.join(*segments)
if path in paths:
continue
tarinfo = tarfile.TarInfo(path)
tarinfo.mode = 0o755
tarinfo.uid = 0
tarinfo.gid = 0
tarinfo.type = tarfile.DIRTYPE
tar_out.addfile(tarinfo)
paths.add(tarinfo.name)
for tarinfo in [layout_filter(obj, archive) for obj in tar_in]: # pylint: disable=cell-var-from-loop
if tarinfo.name in paths:
continue
paths.add(tarinfo.name)
if tarinfo.isreg():
tar_out.addfile(
tarinfo, fileobj=tar_in.extractfile(tarinfo))
else:
tar_out.addfile(tarinfo)
tar_out.close()
proc.stdin.close()
if proc.wait() != 0:
raise ExternalProcessError(
"Error importing archives \"{}\" in image \"{}\"".format(archives, image), proc)
def import_path(self, path, image):
"""
Example:
>>> import shutil
>>> dm = DockerEngine(machine_name='default')
>>> root = tempfile.mkdtemp()
>>> image = "test_import_path:latest"
>>> dm.import_path(root, image)
>>> bool(dm.images(name=image))
True
>>> dm.remove_image(image)
>>> shutil.rmtree(root)
"""
self.logger.info(
"Importing path \"%s\" into image \"%s\"", path, image)
def layout_filter(obj):
obj.uid = 0
obj.gid = 0
return obj
args = ['import', '-', image]
proc = DockerProcess(self, args, stdin=PIPE, stdout=FNULL)
tar = tarfile.open(fileobj=proc.stdin, mode='w|')
tar.add(path, arcname=".", filter=layout_filter)
tar.close()
proc.stdin.close()
if proc.wait() != 0:
raise ExternalProcessError(
"Error importing archive \"{}\" in image \"{}\"".format(path, image), proc)
def install_freeze(self, container, arch=None):
self.logger.info("Installing freeze on container \"%s\"", container)
def layout_filter(obj):
obj.uid = 0
obj.gid = 0
return obj
if arch is None:
arch = self.platform
args = ['cp', '-', "{}:/".format(container)]
proc = DockerProcess(self, args, stdin=PIPE)
tar = tarfile.open(fileobj=proc.stdin, mode='w|')
bindir = tarfile.TarInfo(name="bin")
bindir.uid = 0
bindir.gid = 0
bindir.mode = 0o0755
bindir.type = tarfile.DIRTYPE
tar.addfile(bindir)
tar.add(os.path.join(os.path.dirname(__file__), 'freeze', 'freeze_{}'.format(
arch)), arcname="bin/freeze", filter=layout_filter)
tar.close()
proc.stdin.close()
if proc.wait() != 0:
raise ExternalProcessError(
"Error installing freeze on container \"{}\"".format(container), proc)
def load_archive(self, container, name, fileobj, root="/", uid=None, gid=None):
self.logger.info(
"Loading archive \"%s\" on container \"%s\"", name, container)
def layout_filter(obj):
if uid is not None:
obj.uid = uid
if gid is not None:
obj.gid = gid
return obj
args = ['cp', '-', "{}:{}".format(container, root)]
proc = DockerProcess(self, args, stdin=PIPE)
tar_in = tarfile.open(fileobj=fileobj, mode='r|*')
tar_out = tarfile.open(fileobj=proc.stdin, mode='w|')
for tarinfo in tar_in:
tarinfo = layout_filter(tarinfo)
if tarinfo.name in ['./lib', './usr/lib'] and tarinfo.isdir():
lib64_tarinfo = deepcopy(tarinfo)
lib64_tarinfo.name = "{}64".format(lib64_tarinfo.name)
tar_out.addfile(lib64_tarinfo)
tarinfo.type = tarfile.SYMTYPE
tarinfo.linkname = os.path.basename(lib64_tarinfo.name)
if tarinfo.isreg():
tar_out.addfile(tarinfo, fileobj=tar_in.extractfile(tarinfo))
else:
tar_out.addfile(tarinfo)
tar_out.close()
proc.stdin.close()
if proc.wait() != 0:
raise ExternalProcessError(
"Error loading archive on container \"{}\"".format(container), proc)
def load_image(self, image, path):
args = ['load', '-i', path, image]
proc = DockerProcess(self, args, stdout=FNULL)
if proc.wait() != 0:
raise ExternalProcessError(
"Error loading image \"{}\"".format(image), proc)
def load_layout(self, container, path, root="/", uid=0, gid=0):
self.logger.info(
"Loading layout \"%s\" on container \"%s\"", path, container)
def layout_filter(obj):
obj.uid = uid
obj.gid = gid
return obj
args = ['cp', '-', "{}:{}".format(container, root)]
proc = DockerProcess(self, args, stdin=PIPE)
tar = tarfile.open(fileobj=proc.stdin, mode='w|')
tar.add(path, arcname=".", filter=layout_filter)
tar.close()
proc.stdin.close()
if proc.wait() != 0:
raise ExternalProcessError(
"Error loading layout on container \"{}\"".format(container), proc)
@listify
def networks(self, **filters):
params = ['id', 'name', 'driver']
args = ['network', 'ls']
for key, value in filters.items():
args += ['--filter', '{}={}'.format(key, value)]
proc = DockerProcess(self, args, stdout=PIPE)
if proc.wait() != 0:
raise ExternalProcessError(
"Error requesting \"docker {}\"".format(' '.join(args)), proc)
for line in proc.stdout.read().splitlines()[1:]:
network = {}
values = line.split()
for num, param in enumerate(params):
network[param] = values[num] if values[num] else None
yield network
def process_path(self, container, path, func):
self.logger.info(
"Processing path \"%s\" on container \"%s\"", path, container)
args = ['cp', "{}:{}".format(container, path), "-"]
proc = DockerProcess(self, args, stdout=PIPE)
tar = tarfile.open(fileobj=proc.stdout, mode='r|')
for tarinfo in tar:
func(tar, tarinfo)
if proc.wait() != 0:
raise ExternalProcessError(
"Error processing path on container \"{}\"".format(container), proc)
def push_image(self, image, username, password, registry='index.docker.io'):
self.logger.info(
"Pushing image \"%s\" to registry \"%s\"", image, registry)
full_image_name = '{}/{}'.format(registry, image)
args = ['push', full_image_name]
with DockerRegistryLogin(self, registry, username, password) as login:
proc = DockerProcess(self, args, stdout=FNULL, config=login.config_path)
if proc.wait() != 0:
raise ExternalProcessError(
"Error pushing image \"{}\"".format(full_image_name), proc)
def pull_image(self, image, username=None, password=None, registry='index.docker.io'):
self.logger.info(
"Pulling image \"%s\" from registry \"%s\"", image, registry)
full_image_name = '{}/{}'.format(registry, image)
args = ['pull', full_image_name]
if username and password:
with DockerRegistryLogin(self, registry, username, password) as login:
proc = DockerProcess(self, args, stdout=FNULL, config=login.config_path)
if proc.wait() != 0:
raise ExternalProcessError(
"Error pulling image \"{}\"".format(full_image_name), proc)
else:
proc = DockerProcess(self, args, stdout=FNULL)
if proc.wait() != 0:
raise ExternalProcessError(
"Error pulling image \"{}\"".format(full_image_name), proc)
def remove_container(self, container):
try:
status = list(self.containers(include_stopped=True, name=container))[0]['status']
except IndexError:
status = None
if status in ['running', 'paused']:
self.logger.info("Stopping container \"%s\"", container)
proc = DockerProcess(self, ['stop', container], stdout=FNULL)
if proc.wait() != 0:
raise ExternalProcessError(
"Error stopping container \"{}\"".format(container), proc)
if status is not None:
self.logger.info("Removing container \"%s\"", container)
proc = DockerProcess(self, ['rm', container], stdout=FNULL)
if proc.wait() != 0:
raise ExternalProcessError(
"Error removing container \"{}\"".format(container), proc)
def remove_image(self, name):
for container in self.containers(include_stopped=True, ancestor=name):
self.remove_container(container['names'][0])
for image in self.images(name=name):
self.logger.info("Removing image \"%s\"", image['image'])
proc = DockerProcess(self, ['rmi', image['image']], stdout=FNULL)
if proc.wait() != 0:
raise ExternalProcessError(
"Error removing image \"{}\"".format(image['image']), proc)
def remove_network(self, network):
for container in self.containers(include_stopped=True, network=network):
self.remove_container(container)
self.logger.info("Removing network \"%s\"", network)
proc = DockerProcess(self, ['network', 'rm', network], stdout=FNULL)
if proc.wait() != 0:
raise ExternalProcessError("Error removing network \"{}\"".format(network), proc)
def remove_volume(self, volume):
for container in self.containers(include_stopped=True, volume=volume):
self.remove_container(container)
self.logger.info("Removing volume \"%s\"", volume)
proc = DockerProcess(self, ['volume', 'rm', volume], stdout=FNULL)
if proc.wait() != 0:
raise ExternalProcessError("Error removing volume \"{}\"".format(volume), proc)
def run_cmd(self, container, cmd, privileged=False,
quiet=False, return_output=False, user=None):
if not quiet:
self.logger.info(
"Running command \"%s\" on \"%s\"", cmd, container)
args = ['exec']
if privileged:
args.append('--privileged')
if user:
args += ['-u', user]
args += [container] + self.shell.split(' ') + ['-c', cmd]
proc = DockerProcess(self, args, stdout=PIPE if return_output else None)
if proc.wait() != 0:
raise ExternalProcessError(
"Error running command \"{}\" on container \"{}\"".format(cmd, container), proc)
if return_output:
return proc.stdout.read().strip()
def run_script(self, container, script, privileged=False, shell=None, user=None):
self.logger.info("Running script on \"%s\"", container)
args = ['exec', '-i']
if privileged:
args.append('--privileged')
if user:
args += ['-u', user]
if shell is None:
shell = self.shell
args += [container] + shell.split(' ')
proc = DockerProcess(self, args, stdin=PIPE)
proc.stdin.write(script)
proc.stdin.close()
if proc.wait() != 0:
raise ExternalProcessError(
"Error running script on container \"{}\"".format(container), proc)
def save_image(self, image, path):
mkdir(os.path.dirname(path))
args = ['save', '-o', path, image]
proc = DockerProcess(self, args, stdout=FNULL)
if proc.wait() != 0:
raise ExternalProcessError(
"Error saving image \"{}\"".format(image), proc)
def save_layout(self, container, src, dst):
self.logger.info(
"Saving layout \"%s:%s\" on path \"%s\"", container, src, dst)
args = ['cp', "{}:{}".format(container, src), "-"]
proc = DockerProcess(self, args, stdout=PIPE)
tar = tarfile.open(fileobj=proc.stdout, mode='r|')
for member in tar:
member.name = os.path.normpath(member.name.lstrip('/'))
tar.extract(member, dst)
tar.close()
if proc.wait() != 0:
raise ExternalProcessError(
"Error saving layout from container \"{}\"".format(container), proc)
def start_container(self, container):
self.logger.info("Starting container \"%s\"", container)
proc = DockerProcess(self, ['start', container], stdout=FNULL)
if proc.wait() != 0:
raise ExternalProcessError(
"Error creating container \"{}\"".format(container), proc)
@listify
def volumes(self, **filters):
params = ['driver', 'name']
args = ['volume', 'ls']
for key, value in filters.items():
args += ['-f', '{}={}'.format(key, value)]
proc = DockerProcess(self, args, stdout=PIPE)
if proc.wait() != 0:
raise ExternalProcessError(
"Error requesting \"docker {}\"".format(' '.join(args)), proc)
for line in proc.stdout.read().splitlines()[1:]:
volume = {}
values = line.split()
for num, param in enumerate(params):
volume[param] = values[num] if values[num] else None
yield volume
class BaseDockerSubRecipe(BaseSubRecipe):
def initialize(self):
super(BaseDockerSubRecipe, self).initialize()
self.engine = DockerEngine(
logger=self.logger,
machine_name=self.options.get('machine-name', None),
url=self.options.get('engine-url', None),
tlsverify=self.options.get('engine-tls-verify', None),
tlscertpath=self.options.get('engine-tls-cert-path', None),
shell=self.shell,
timeout=int(self.options.get(
'timeout', DEFAULT_TIMEOUT)))
def is_image_updated(self, name):
if not os.path.exists(self.completed):
return True
completed_mtime = datetime.fromtimestamp(
os.stat(self.completed).st_mtime)
for image in self.engine.images(name=name):
if image['created_at'] > completed_mtime:
return True
return False
def is_layout_updated(self, layout):
if not os.path.exists(self.completed):
return True
completed_mtime = os.stat(self.completed).st_mtime
for dirname, _, files in os.walk(layout):
if os.stat(dirname).st_mtime > completed_mtime:
return True
for filename in files:
if os.lstat(os.path.join(dirname, filename)).st_mtime > completed_mtime:
return True
return False
| |
#
# 1337ris -- main.py
# Henry Weiss
#
# The backbone of the game. Has many roles and responsibilities, but that is
# the most succint way of putting it. In more detail:
#
# - Runs the event loop and coordinates communication between other controller
# objects.
# - Manages the game state, and any transitioning/communication between them.
# - Manages the resource pools (images and sounds), allowing access to shared
# resources.
# - Contains a lot of what defines the application functionality (e.g. mini-
# mizing, toggling fullscreen, etc.) -- basically glorified event handling.
# - Has several utility methods function, like a time formatter, a fade out
# music function that takes streaming settings into account, and a high scores
# file interface.
# - Executes screen transitions.
#
# Note: 1337ris uses a time-based animation system instead of a frame or
# tick-based update system. The pygame timer really bogs down the game for
# some reason, so I decided to use time-based animation in order to get
# decent response from the keys. (Maybe there's too much blitting? Perhaps
# it was due to pygame overhead? Or maybe SDL's timer just sucks. I'm
# guessing it's pygame overhead, but I don't really know.)
#
from headers import *
# State constants found through trial-and-error (since pygame was so kind as
# to not specify the activate event states...)
INPUTFOCUSCHANGED_MAC = 3
INPUTFOCUSCHANGED_WIN = 2
MINIMIZE_MAC = 4
MINIMIZE_WIN = 6
# Stuff for screen transitions
TRANSITION_POINTS = 7 # How many bars will be used, for instance
TRANSITION_OVERLAP_SPEED = 1.125
TRANSITION_SPLIT_HORIZ_SPEED = 3.125
TRANSITION_SPLIT_VERT_SPEED = 2.125
TRANSITION_CATCHUP_SPEED = 4.75
TRANSITION_CATCHUP_OFFSET = 300 # Pixel difference between bars in catchup
TRANSITION_CROSSFADE_DELAY = 350 # In milliseconds
TRANSITION_BLOCKS_SPEED = 0.45
TRANSITION_BLOCK_SIZE = (40, 40)
TRANSITION_QUARTERS_SPEED = 1.125
class Main:
# Initializes pygame and our helper controller objects.
def __init__(self):
# First, check if we can load things other than just BMPs
if not image.get_extended():
print "Fatal Error: extended image support not enabled."
exit() # Bail out -- we can't load our resources
# Otherwise, start up pygame
mixer.pre_init(SOUND_FREQ) # Higher quality sounds
pygame.init()
# And initialize our controller objects
self.prefs_controller = PrefsController(PREFS_FILE, DEFAULTS)
self.sound_controller = SoundController(self.prefs_controller.get(SOUND_VOLUME), self.prefs_controller.get(MUSIC_VOLUME))
# Resource pool to keep track of images
self.image_pool = {}
# Initialize the display
display.set_caption("1337ris")
self.fullscreen = self.prefs_controller.get(RUN_FULLSCREEN)
# Initialize and load the game states
self.states = [MainMenu(self), HighScores(self), TraditionalMode(self), CrossCutMode(self), ConvergenceMode(self), PsychedelicMode(self)]
self.state = STATE_LOADING
# For keeping track of frame rate
self.clock = Clock()
self.fps_font = Font(DEFAULT_FONT, 12)
# For pausing if app is deactivated and such
self.active = True
# Transitions
self.in_transition = False
self.current_transition = 0
# Grab the total number of resources to load
self.resources_loaded = -1
self.total_resources = 0
for state in self.states:
self.total_resources += state.total_resources
# Intercepts attribute changes
def __setattr__(self, attr, value):
# State changer. Notifies the old and new states of the transition.
if attr == "state" and attr in self.__dict__:
if self.state != STATE_LOADING:
# Just choose a random transition
transition = TRANSITION_RANDOM
# Notify old state of change
userdata = self.states[self.state].stop()
else:
# We really want crossfading to open the show
transition = TRANSITION_CROSSFADE
userdata = None # Oh, and we need to declare this too
if value != STATE_LOADING:
# Some states will switch in their start method if something
# failed or if the state wasn't ready to switch, so we need
# to account for that
expected_state = self.state
# Switch states and notify new state of change
self.states[value].start(userdata)
if self.state != expected_state:
return # A different call must have already taken care of this
# Do transition with the new screen
self.transition_screen(transition, self.states[value].draw(None, False))
# Pauses sounds upon activation/deactivation events
elif attr == "active":
if not value and self.state >= STATE_TRADITIONAL:
self.states[self.state].paused = True
self.sound_controller.paused = True
elif self.state < STATE_TRADITIONAL or not self.states[self.state].paused:
self.sound_controller.paused = not value
# Switches display modes
elif attr == "fullscreen":
# Hide the cursor in fullscreen
mouse.set_visible(not value)
# Finally switch the mode
if value:
display.set_mode(DIMENSIONS, FULLSCREEN)
else:
display.set_mode(DIMENSIONS)
# Otherwise default behavior
self.__dict__[attr] = value
# Loads resources for each game state
def load_resources(self):
# Get the loading images ready
self.loading_img = image.load(IMG_PATH + "loading.png")
self.progress_bg = image.load(IMG_PATH + "progress bg.png")
self.progress_complete = image.load(IMG_PATH + "progress complete.png")
self.progress_bar = image.load(IMG_PATH + "progress.png")
# Set up the viewable rect (how much of the progress bar should be shown)
self.progress_rect = self.progress_bar.get_rect()
# Draw the initial loading screen image
self.update_load_progress()
# Now load the resources
for state in self.states:
state.load_resources()
# Delay a bit before we go to the main menu
self.load_wait = 1000
# Updates the resource counter and the progress bar
def update_load_progress(self):
self.resources_loaded += 1
self.progress_rect.width = (float(self.resources_loaded) / self.total_resources) * self.progress_bar.get_width()
# Check if the user is trying to quit (not perfect, but better than nothing...)
events = event.get()
for next_event in events:
if next_event.type == KEYDOWN:
if (next_event.key == K_q and key.get_mods() & KMOD_META) or (next_event.key == K_F4 and key.get_mods() & KMOD_ALT):
self.quit()
# Draw the loading screen to the screen
self.draw_loading_screen(display.get_surface())
# And swap the buffers
display.update()
# Draws the loading screen onto the specified surface
def draw_loading_screen(self, surface):
surface.blit(self.loading_img, (0, 0))
surface.blit(self.progress_bg, (SCREEN_WIDTH * 0.5 - self.progress_bg.get_rect().width * 0.5, 360))
surface.blit(self.progress_bar, (SCREEN_WIDTH * 0.5 - self.progress_bar.get_rect().width * 0.5, 360), self.progress_rect)
# Complete?
if self.resources_loaded >= self.total_resources:
surface.blit(self.progress_complete, (SCREEN_WIDTH * 0.5 - self.progress_complete.get_rect().width * 0.5, 360))
# Runs the main event loop
def run(self):
last_time = time.get_ticks()
while True:
# Keep track of timing
elapsed_time = time.get_ticks() - last_time
last_time += elapsed_time # Make sure we don't "lose" any time
self.clock.tick()
# Grab all waiting events from the queue (unless we're inactive, in which
# case just wait for an event, so we don't hog up the CPU)
if self.active:
events = event.get()
else:
# The huge jump that will occur when waiting for an event might throw
# off some of the time-based animations, so we're gonna prevent that.
old_elapsed = elapsed_time
events = [event.wait()]
elapsed_time = old_elapsed
last_time = time.get_ticks()
for next_event in events:
# Close window?
if next_event.type == QUIT:
self.quit()
# Handle activation events
elif self.is_activation_event(next_event):
self.active = next_event.gain
continue
# Handle key input (mostly special/system key commands first, then handoff to current state)
elif next_event.type == KEYDOWN:
keycode = next_event.key
# "Boss keys", i.e. emergency bailout. Responds to Cmd-Q (Macs) and Alt-F4 (Windows)
if (keycode == K_q and key.get_mods() & KMOD_META) or (keycode == K_F4 and key.get_mods() & KMOD_ALT):
self.quit()
# Minimizing (Windows doesn't have a system shortcut for minimizing, so it doesn't get one here! Mwahaha!)
elif keycode == K_m and key.get_mods() & KMOD_META:
# Exit fullscreen in order to minimize
if self.fullscreen:
self.fullscreen = False
display.iconify()
# For some reason deactivate events aren't always posted...
self.active = False
continue
# Temporary full-screen toggling (Cmd-F for Macs, Alt-Enter/F11 for Windows)
elif (keycode == K_f and key.get_mods() & KMOD_META) or (keycode == K_RETURN and key.get_mods() & KMOD_ALT) or keycode == K_F11:
self.fullscreen = not self.fullscreen
# Otherwise just hand it off to the current game state
elif self.state != STATE_LOADING:
self.states[self.state].key_down(keycode, next_event.unicode)
# Are we still in the middle of the loading screen pause?
if self.state == STATE_LOADING:
if self.load_wait <= 0:
self.state = STATE_MAIN_MENU
else:
self.load_wait -= elapsed_time
# Just blit the loading complete screen and get out of here
self.draw_loading_screen(display.get_surface())
display.update()
continue
# Update and redraw (only if the app is active)
if self.active:
# Check if a screen transition is in place
if self.in_transition:
self.update_transition(elapsed_time)
# Otherwise, update normally
else:
self.states[self.state].update(elapsed_time)
self.states[self.state].draw()
# Draw frame rate
if self.prefs_controller.get(DRAW_FRAMERATE) or (key.get_pressed()[K_r] and
(key.get_mods() & KMOD_META or key.get_mods() & KMOD_CTRL)):
# Draw the frame rate plus a shadow so it can stand out on light backgrounds
shadow = self.fps_font.render("Frame Rate: %.3f fps" % self.clock.get_fps(), True, (0, 0, 0))
text = self.fps_font.render("Frame Rate: %.3f fps" % self.clock.get_fps(), True, (255, 255, 0))
display.get_surface().blit(shadow, (6, 463))
display.get_surface().blit(text, (5, 462))
# And swap the buffers
display.update()
# Formats a millisecond counter into an actual, human-readable time display
def time_to_str(self, millis):
time_str = "%02d:%02d" % (millis / 60000 % 60, millis / 1000 % 60)
if millis / 3600000 > 0:
time_str = str(millis / 3600000) + ":" + time_str # We're into hours now... o_O
return time_str
# Reads the high scores file into a list of tuples. See the HighScores
# class for a description of what the tuple format is.
def read_high_scores(self, filename):
# Check if the file even exists
if not os.access(filename, os.F_OK):
return []
# Otherwise, gather the scores
scores = []
current_score = [] # For constructing the tuple
index = 0
for line in file(filename, 'rb'):
line = line.strip() # Remove newlines
# Decrypt the current line
data = line.split(DELIMITER)
data.reverse()
string = ''
for ch in data:
if ch != '':
string += chr(-~(int(ch) ^ 127))
# Name, at index one, is the only non-int type
if index != ENTRY_NAME:
current_score.append(int(string))
else:
current_score.append(string)
# Finished reading one score entry?
if index == TOTAL_ITEMS - 1:
scores.append((current_score[0], current_score[1], current_score[2], current_score[3]))
current_score = []
index = 0
else:
index += 1
return scores
# Saves a list of high score tuple entries to the high scores file. See the HighScores
# class for a description of what the tuple format is.
def save_high_scores(self, filename, scores):
output = file(filename, 'wb')
z = 1
for entry in scores:
for item in entry:
# Convert to string
item = str(item)
# Save the data in reverse
for i in range(len(item) - 1, -1, -1):
output.write(str(-~(ord(item[i]) ^ 127)) + DELIMITER)
# Add newline
output.write("\n")
# Save the file
output.close()
# For testing if an event is an activation event or not (due to stupid differences
# in SDL between OS X/Windows, and no constants that I'm aware of...)
def is_activation_event(self, next_event):
return (next_event.type == ACTIVEEVENT and
(next_event.state == INPUTFOCUSCHANGED_MAC or next_event.state == INPUTFOCUSCHANGED_WIN or
next_event.state == MINIMIZE_MAC or next_event.state == MINIMIZE_WIN))
# Fades out the sound for (by default) 1 second.
def fadeout_sound(self, delay=1000):
if self.prefs_controller.get(STREAM_MUSIC):
music.fadeout(delay)
while music.get_busy():
pass
else:
mixer.fadeout(delay)
while mixer.get_busy():
pass
# Clear any stray events from the queue
event.clear()
# Initiates a screen transition. The end parameter designates
# the surface that should be drawn at the end of the transition.
# The starting image is, by default, whatever is on the screen
# at the time this is called, although you can pass in a custom
# starting image. To get the end image, use a state's draw()
# function, but make sure it doesn't draw to the screen. You can
# then pass the returned surface to this function.
def transition_screen(self, type, end, start=None):
self.end = end
# Get the starting images
if start is None:
self.start = display.get_surface().copy()
else:
self.start = start
# Choose the transition
if type == TRANSITION_RANDOM:
self.current_transition = randint(0, TOTAL_TRANSITIONS - 1)
else:
self.current_transition = type
# Setup certain variables for the screen transition
if self.current_transition == TRANSITION_OPEN:
self.x_pos = [SCREEN_WIDTH / 2, SCREEN_WIDTH / 2]
elif self.current_transition == TRANSITION_CLOSE:
self.x_pos = [SCREEN_WIDTH, 0] # Start from outer edges of screen
elif self.current_transition == TRANSITION_CATCHUP:
self.x_pos = []
for i in range(TRANSITION_POINTS):
self.x_pos.append(-(i * TRANSITION_CATCHUP_OFFSET))
elif self.current_transition == TRANSITION_CROSSFADE:
self.delay = TRANSITION_CROSSFADE_DELAY
# Make sure the start/end images don't have per-pixel transparency
self.start.set_alpha(None)
self.end.set_alpha(None)
elif self.current_transition == TRANSITION_BLOCKS:
# Break the screen up into tiles
self.screen_blocks = []
for y in range(0, SCREEN_HEIGHT, TRANSITION_BLOCK_SIZE[1]):
for x in range(0, SCREEN_WIDTH, TRANSITION_BLOCK_SIZE[0]):
self.screen_blocks.append((x, y))
# Scramble the list
shuffle(self.screen_blocks)
# For traversing the blocks list
self.current_block = 0
# Ensures the blocks get drawn no matter how fast the frame rate is
self.delay_time = 0
elif self.current_transition == TRANSITION_SPLIT_HORIZ:
# Two bars on opposite sides of the screen
self.x_pos = [-SCREEN_WIDTH, SCREEN_WIDTH]
elif self.current_transition == TRANSITION_SPLIT_VERT:
# Two bars on opposite sides of the screen
self.y_pos = [-SCREEN_HEIGHT, SCREEN_HEIGHT]
elif self.current_transition == TRANSITION_QUARTERS:
self.x_pos = 0
self.y_pos = 0
# And let it start
self.in_transition = True
# Updating method for screen transitions that updates and draws
# the current state of the transition during the event loop.
def update_transition(self, elapsed_time):
if self.current_transition == TRANSITION_OPEN:
# Update shift positions
self.x_pos[0] += elapsed_time * TRANSITION_OVERLAP_SPEED
self.x_pos[1] -= elapsed_time * TRANSITION_OVERLAP_SPEED
# Check if transition finished and correct if necessary
if self.x_pos[0] > SCREEN_WIDTH or self.x_pos[1] < 0:
self.x_pos[0] = SCREEN_WIDTH
self.x_pos[1] = 0
self.in_transition = False
# Construct Rects to draw the portions (ceiling is used
# to make sure the middle is always covered, regardless of
# rounding/truncating).
part1 = Rect(SCREEN_WIDTH / 2, 0, math.ceil(self.x_pos[0] - SCREEN_WIDTH / 2), SCREEN_HEIGHT)
part2 = Rect(self.x_pos[1], 0, math.ceil(SCREEN_WIDTH / 2 - self.x_pos[1]), SCREEN_HEIGHT)
# Draw those portions
display.get_surface().blit(self.start, (0, 0))
display.get_surface().blit(self.end, (SCREEN_WIDTH / 2, 0), part1)
display.get_surface().blit(self.end, (self.x_pos[1], 0), part2)
elif self.current_transition == TRANSITION_CLOSE:
# Update shift positions
self.x_pos[0] -= elapsed_time * TRANSITION_OVERLAP_SPEED
self.x_pos[1] += elapsed_time * TRANSITION_OVERLAP_SPEED
# Check if transition finished and correct if necessary
if self.x_pos[0] < SCREEN_WIDTH / 2 or self.x_pos[1] > SCREEN_WIDTH / 2:
self.x_pos[0] = SCREEN_WIDTH / 2
self.x_pos[1] = SCREEN_WIDTH / 2
self.in_transition = False
# Construct Rects to draw the portions (ceiling is used
# to make sure the middle is always covered, regardless of
# rounding/truncating).
part1 = Rect(SCREEN_WIDTH / 2, 0, math.ceil(self.x_pos[0] - SCREEN_WIDTH / 2), SCREEN_HEIGHT)
part2 = Rect(self.x_pos[1], 0, math.ceil(SCREEN_WIDTH / 2 - self.x_pos[1]), SCREEN_HEIGHT)
# Draw those portions
display.get_surface().blit(self.end, (0, 0))
display.get_surface().blit(self.start, (SCREEN_WIDTH / 2, 0), part1)
display.get_surface().blit(self.start, (self.x_pos[1], 0), part2)
elif self.current_transition == TRANSITION_CATCHUP:
# Blit the start image first
display.get_surface().blit(self.start, (0, 0))
# Update shift positions
for i in range(TRANSITION_POINTS):
self.x_pos[i] += elapsed_time * TRANSITION_CATCHUP_SPEED
# Correct if this bar has "caught up"
if self.x_pos[i] >= SCREEN_WIDTH:
self.x_pos[i] = SCREEN_WIDTH
# Done?
if i == TRANSITION_POINTS - 1:
self.in_transition = False
# Calculate this bar rect
top = SCREEN_HEIGHT * (i / float(TRANSITION_POINTS))
bottom = SCREEN_HEIGHT * ((i + 1) / float(TRANSITION_POINTS))
bar = Rect(0, top, max(self.x_pos[i], 0), math.ceil(bottom - top))
# Blit it
display.get_surface().blit(self.end, (0, top), bar)
elif self.current_transition == TRANSITION_CROSSFADE:
# Update the time remaining
self.delay -= elapsed_time
# Done?
if self.delay <= 0:
self.delay = 0
self.in_transition = False
# Update the transparency values
alpha = 255 * (float(self.delay) / TRANSITION_CROSSFADE_DELAY)
self.start.set_alpha(alpha)
self.end.set_alpha(255 - alpha)
# Blit the two images now
display.get_surface().blit(self.start, (0, 0))
display.get_surface().blit(self.end, (0, 0))
elif self.current_transition == TRANSITION_BLOCKS:
blocks_to_draw = int(round((elapsed_time + self.delay_time) * TRANSITION_BLOCKS_SPEED))
# Too fast?
if blocks_to_draw < 1:
self.delay_time += elapsed_time # Save the elapsed time so we can use it next time
else:
self.delay_time = 0 # We can draw now
display.get_surface().blit(self.start, (0, 0))
# Draw the new screen in portions over the old screen
for i in range(blocks_to_draw):
# Extract the block rectangles and draw them
for j in range(self.current_block + 1):
block = self.screen_blocks[j]
display.get_surface().blit(self.end, block, Rect(block, TRANSITION_BLOCK_SIZE))
# Move onto next block
self.current_block += 1
# Done?
if self.current_block >= len(self.screen_blocks):
self.in_transition = False
break
elif self.current_transition == TRANSITION_SPLIT_HORIZ:
# Update shift positions
self.x_pos[0] += elapsed_time * TRANSITION_SPLIT_HORIZ_SPEED
self.x_pos[1] -= elapsed_time * TRANSITION_SPLIT_HORIZ_SPEED
# Done?
if self.x_pos[1] < 0:
# Correct if necessary
self.x_pos[0] = 0
self.x_pos[1] = 0
self.in_transition = False
# The two bars, are the exact same, except for their y starting location...
bar1 = Rect(0, 0, SCREEN_WIDTH, SCREEN_HEIGHT / 2)
bar2 = Rect(0, SCREEN_HEIGHT / 2, SCREEN_WIDTH, SCREEN_HEIGHT / 2)
# ...where we blit the bars is more important
display.get_surface().blit(self.start, (0, 0))
display.get_surface().blit(self.end, (self.x_pos[0], 0), bar1)
display.get_surface().blit(self.end, (self.x_pos[1], SCREEN_HEIGHT / 2), bar2)
elif self.current_transition == TRANSITION_SPLIT_VERT:
# Update shift positions
self.y_pos[0] += elapsed_time * TRANSITION_SPLIT_VERT_SPEED
self.y_pos[1] -= elapsed_time * TRANSITION_SPLIT_VERT_SPEED
# Done?
if self.y_pos[1] < 0:
# Correct if necessary
self.y_pos[0] = 0
self.y_pos[1] = 0
self.in_transition = False
# See above comments, except applied to the x start and y location
bar1 = Rect(0, 0, SCREEN_WIDTH / 2, SCREEN_HEIGHT)
bar2 = Rect(SCREEN_WIDTH / 2, 0, SCREEN_WIDTH / 2, SCREEN_HEIGHT)
display.get_surface().blit(self.start, (0, 0))
display.get_surface().blit(self.end, (0, self.y_pos[0]), bar1)
display.get_surface().blit(self.end, (SCREEN_WIDTH / 2, self.y_pos[1]), bar2)
elif self.current_transition == TRANSITION_QUARTERS:
# Update each shift point
self.x_pos += elapsed_time * TRANSITION_QUARTERS_SPEED
# Since the width:height ratio is usually not 1:1, we have to slow
# down the y speed, since it has less distance to travel.
self.y_pos += elapsed_time * (TRANSITION_QUARTERS_SPEED * (float(SCREEN_HEIGHT) / SCREEN_WIDTH))
# Check if the transition is done and correct if necessary
if self.x_pos > SCREEN_WIDTH / 2:
self.x_pos = SCREEN_WIDTH / 2
self.y_pos = SCREEN_HEIGHT / 2
self.in_transition = False
# Extract each portion of the image. The first portion is easy, since it is
# relative to the origin, but the other portions are all relative to the
# corner they started at (pt 1 from top left, pt 2 from top right, pt 3 from
# bottom left, pt 4 from bottom right).
part1 = Rect(0, 0, self.x_pos, self.y_pos)
part2 = Rect(SCREEN_WIDTH - self.x_pos, 0, self.x_pos, self.y_pos)
part3 = Rect(0, SCREEN_HEIGHT - self.y_pos, self.x_pos, self.y_pos)
part4 = Rect(SCREEN_WIDTH - self.x_pos, SCREEN_HEIGHT - self.y_pos, self.x_pos, self.y_pos)
# Now that we have the correct portions of the image, we can just use the
# starting point relative to the image as the blitting location.
display.get_surface().blit(self.start, (0, 0))
display.get_surface().blit(self.end, part1.topleft, part1)
display.get_surface().blit(self.end, part2.topleft, part2)
display.get_surface().blit(self.end, part3.topleft, part3)
display.get_surface().blit(self.end, part4.topleft, part4)
# Self-explanatory
def quit(self):
pygame.quit()
sys.exit()
| |
##########################################################################
#
# Copyright (c) 2018, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import six
import threading
import unittest
import timeit
import IECore
import Gaffer
import GafferTest
class ParallelAlgoTest( GafferTest.TestCase ) :
# Context manager used to test code which uses `ParallelAlgo::callOnUIThread()`.
# This emulates the call handler that the UI would usually install.
class UIThreadCallHandler( object ) :
def __enter__( self ) :
self.__assertDone = False
self.__queue = six.moves.queue.Queue()
Gaffer.ParallelAlgo.pushUIThreadCallHandler( self.__callOnUIThread )
return self
def __exit__( self, type, value, traceBack ) :
Gaffer.ParallelAlgo.popUIThreadCallHandler()
while True :
try :
f = self.__queue.get( block = False )
except six.moves.queue.Empty:
return
if self.__assertDone :
raise AssertionError( "UIThread call queue not empty" )
f()
def __callOnUIThread( self, f ) :
self.__queue.put( f )
# Waits for a single use of `callOnUIThread()`, raising
# a test failure if none arises before `timeout` seconds.
def assertCalled( self, timeout = 30.0 ) :
try :
f = self.__queue.get( block = True, timeout = timeout )
except six.moves.queue.Empty :
raise AssertionError( "UIThread call not made within {} seconds".format( timeout ) )
f()
# Asserts that no further uses of `callOnUIThread()` will
# be made with this handler. This is checked on context exit.
def assertDone( self ) :
self.__assertDone = True
# Waits for `time` seconds, processing any calls to
# `ParallelAlgo::callOnUIThread()` made during that time.
def waitFor( self, time ) :
startTime = timeit.default_timer()
elapsed = 0.0
while elapsed < time:
try:
f = self.__queue.get( block = True, timeout = time - elapsed )
except six.moves.queue.Empty:
return
f()
elapsed = timeit.default_timer() - startTime
def testCallOnUIThread( self ) :
s = Gaffer.ScriptNode()
def uiThreadFunction() :
s.setName( "test" )
s.uiThreadId = six.moves._thread.get_ident()
with self.UIThreadCallHandler() as h :
t = threading.Thread(
target = lambda : Gaffer.ParallelAlgo.callOnUIThread( uiThreadFunction )
)
t.start()
h.assertCalled()
t.join()
h.assertDone()
self.assertEqual( s.getName(), "test" )
self.assertEqual( s.uiThreadId, six.moves._thread.get_ident() )
def testNestedUIThreadCallHandler( self ) :
# This is testing our `UIThreadCallHandler` utility
# class more than it's testing `ParallelAlgo`.
s = Gaffer.ScriptNode()
def uiThreadFunction1() :
s.setName( "test" )
s.uiThreadId1 = six.moves._thread.get_ident()
def uiThreadFunction2() :
s["fileName"].setValue( "test" )
s.uiThreadId2 = six.moves._thread.get_ident()
with self.UIThreadCallHandler() as h1 :
t1 = threading.Thread(
target = lambda : Gaffer.ParallelAlgo.callOnUIThread( uiThreadFunction1 )
)
t1.start()
h1.assertCalled()
h1.assertDone()
with self.UIThreadCallHandler() as h2 :
t2 = threading.Thread(
target = lambda : Gaffer.ParallelAlgo.callOnUIThread( uiThreadFunction2 )
)
t2.start()
h2.assertCalled()
h2.assertDone()
self.assertEqual( s.getName(), "test" )
self.assertEqual( s.uiThreadId1, six.moves._thread.get_ident() )
self.assertEqual( s["fileName"].getValue(), "test" )
self.assertEqual( s.uiThreadId2, six.moves._thread.get_ident() )
t1.join()
t2.join()
def testCallOnBackgroundThread( self ) :
script = Gaffer.ScriptNode()
script["n"] = GafferTest.AddNode()
foregroundContext = Gaffer.Context( script.context() )
foregroundContext["a"] = "a"
def f() :
backgroundContext = Gaffer.Context.current()
self.assertFalse( backgroundContext.isSame( foregroundContext ) )
self.assertEqual( backgroundContext, foregroundContext )
with self.assertRaises( IECore.Cancelled ) :
while True :
script["n"]["sum"].getValue()
# We might expect that `script["n"]["sum"].getValue()`
# would be guaranteed to throw after cancellation has been
# requested. But that is not the case if both the hash and the
# value are already cached, because cancellation is only checked
# for automatically when a Process is constructed. So we take
# a belt and braces approach and perform an explicit check here.
#
# The alternative would be to move the cancellation check outside
# of the Process class, so it is performed before the cache lookup.
# This may be the better approach, but we would need to benchmark
# it to ensure that performance was not adversely affected. To our
# knowledge, this "cache hits avoid cancellation" problem has not
# been responsible for unresponsive cancellation in the wild, because
# background tasks are typically triggered by `plugDirtiedSignal()`,
# and the hash cache is cleared when a plug is dirtied.
IECore.Canceller.check( backgroundContext.canceller() )
# Explicit cancellation
with foregroundContext :
backgroundTask = Gaffer.ParallelAlgo.callOnBackgroundThread( script["n"]["sum"], f )
backgroundTask.cancel()
# Implicit cancellation through graph edit
with foregroundContext :
backgroundTask = Gaffer.ParallelAlgo.callOnBackgroundThread( script["n"]["sum"], f )
script["n"]["op1"].setValue( 10 )
# Cancellation through deletion
with foregroundContext :
backgroundTask = Gaffer.ParallelAlgo.callOnBackgroundThread( script["n"]["sum"], f )
del backgroundTask
def testBackgroundThreadMonitoring( self ) :
s = Gaffer.ScriptNode()
s["n"] = GafferTest.MultiplyNode()
s["n"]["op2"].setValue( 1 )
s["e"] = Gaffer.Expression()
s["e"].setExpression( """parent["n"]["op1"] = context["op1"]""" )
def backgroundFunction() :
with Gaffer.Context() as c :
for i in range( 0, 10000 ) :
c["op1"] = i
self.assertEqual( s["n"]["product"].getValue(), i )
with Gaffer.PerformanceMonitor() as m :
t = Gaffer.ParallelAlgo.callOnBackgroundThread(
s["n"]["product"], backgroundFunction
)
t.wait()
# The monitor was active when we launched the background
# process, so we expect it to have been transferred to the
# background thread and remained active there for the duration.
self.assertEqual( m.plugStatistics( s["n"]["product"] ).computeCount, 10000 )
if __name__ == "__main__":
unittest.main()
| |
"""
Unit tests of Path types.
"""
import numpy as np
from holoviews import Dataset, Ellipse, Box, Polygons, Path
from holoviews.core.data.interface import DataError
from holoviews.element.comparison import ComparisonTestCase
class PathTests(ComparisonTestCase):
def test_multi_path_list_constructor(self):
path = Path([[(0, 1), (1, 2)], [(2, 3), (3, 4)]])
self.assertTrue(path.interface.multi)
self.assertEqual(path.dimension_values(0), np.array([
0, 1, np.nan, 2, 3]))
self.assertEqual(path.dimension_values(1), np.array([
1, 2, np.nan, 3, 4]))
def test_multi_path_cast_path(self):
path = Path([[(0, 1), (1, 2)], [(2, 3), (3, 4)]])
path2 = Path(path)
self.assertTrue(path2.interface.multi)
self.assertEqual(path2.dimension_values(0), np.array([
0, 1, np.nan, 2, 3]))
self.assertEqual(path2.dimension_values(1), np.array([
1, 2, np.nan, 3, 4]))
def test_multi_path_tuple(self):
path = Path(([0, 1], [[1, 3], [2, 4]]))
self.assertTrue(path.interface.multi)
self.assertEqual(path.dimension_values(0), np.array([
0, 1, np.nan, 0, 1]))
self.assertEqual(path.dimension_values(1), np.array([
1, 2, np.nan, 3, 4]))
def test_multi_path_unpack_single_paths(self):
path = Path([Path([(0, 1), (1, 2)]), Path([(2, 3), (3, 4)])])
self.assertTrue(path.interface.multi)
self.assertEqual(path.dimension_values(0), np.array([
0, 1, np.nan, 2, 3]))
self.assertEqual(path.dimension_values(1), np.array([
1, 2, np.nan, 3, 4]))
def test_multi_path_unpack_multi_paths(self):
path = Path([Path([[(0, 1), (1, 2)]]),
Path([[(2, 3), (3, 4)], [(4, 5), (5, 6)]])])
self.assertTrue(path.interface.multi)
self.assertEqual(path.dimension_values(0), np.array([
0, 1, np.nan, 2, 3, np.nan, 4, 5]))
self.assertEqual(path.dimension_values(1), np.array([
1, 2, np.nan, 3, 4, np.nan, 5, 6]))
def test_single_path_list_constructor(self):
path = Path([(0, 1), (1, 2), (2, 3), (3, 4)])
self.assertEqual(path.dimension_values(0), np.array([
0, 1, 2, 3]))
self.assertEqual(path.dimension_values(1), np.array([
1, 2, 3, 4]))
def test_single_path_tuple_constructor(self):
path = Path(([0, 1, 2, 3], [1, 2, 3, 4]))
self.assertEqual(path.dimension_values(0), np.array([
0, 1, 2, 3]))
self.assertEqual(path.dimension_values(1), np.array([
1, 2, 3, 4]))
def test_multi_path_list_split(self):
path = Path([[(0, 1), (1, 2)], [(2, 3), (3, 4)]])
subpaths = path.split()
self.assertEqual(len(subpaths), 2)
self.assertEqual(subpaths[0], Path([(0, 1), (1, 2)]))
self.assertEqual(subpaths[1], Path([(2, 3), (3, 4)]))
def test_single_path_split(self):
path = Path(([0, 1, 2, 3], [1, 2, 3, 4]))
self.assertEqual(path, path.split()[0])
def test_dataset_groupby_path(self):
ds = Dataset([(0, 0, 1), (0, 1, 2), (1, 2, 3), (1, 3, 4)], ['group', 'x', 'y'])
subpaths = ds.groupby('group', group_type=Path)
self.assertEqual(len(subpaths), 2)
self.assertEqual(subpaths[0], Path([(0, 1), (1, 2)]))
self.assertEqual(subpaths[1], Path([(2, 3), (3, 4)]))
class PolygonsTests(ComparisonTestCase):
def setUp(self):
xs = [1, 2, 3]
ys = [2, 0, 7]
holes = [[[(1.5, 2), (2, 3), (1.6, 1.6)], [(2.1, 4.5), (2.5, 5), (2.3, 3.5)]]]
self.single_poly = Polygons([{'x': xs, 'y': ys, 'holes': holes}])
xs = [1, 2, 3, np.nan, 6, 7, 3]
ys = [2, 0, 7, np.nan, 7, 5, 2]
holes = [
[[(1.5, 2), (2, 3), (1.6, 1.6)], [(2.1, 4.5), (2.5, 5), (2.3, 3.5)]],
[]
]
self.multi_poly = Polygons([{'x': xs, 'y': ys, 'holes': holes}])
self.multi_poly_no_hole = Polygons([{'x': xs, 'y': ys}])
self.distinct_polys = Polygons([
{'x': xs, 'y': ys, 'holes': holes, 'value': 0},
{'x': [4, 6, 6], 'y': [0, 2, 1], 'value': 1}], vdims='value')
def test_single_poly_holes_match(self):
self.assertTrue(self.single_poly.interface.has_holes(self.single_poly))
paths = self.single_poly.split(datatype='array')
holes = self.single_poly.interface.holes(self.single_poly)
self.assertEqual(len(paths), len(holes))
self.assertEqual(len(holes), 1)
self.assertEqual(len(holes[0]), 1)
self.assertEqual(len(holes[0][0]), 2)
def test_multi_poly_holes_match(self):
self.assertTrue(self.multi_poly.interface.has_holes(self.multi_poly))
paths = self.multi_poly.split(datatype='array')
holes = self.multi_poly.interface.holes(self.multi_poly)
self.assertEqual(len(paths), len(holes))
self.assertEqual(len(holes), 1)
self.assertEqual(len(holes[0]), 2)
self.assertEqual(len(holes[0][0]), 2)
self.assertEqual(len(holes[0][1]), 0)
def test_multi_poly_empty_holes(self):
poly = Polygons([])
self.assertFalse(poly.interface.has_holes(poly))
self.assertEqual(poly.interface.holes(poly), [])
def test_multi_poly_no_holes_match(self):
self.assertFalse(self.multi_poly_no_hole.interface.has_holes(self.multi_poly_no_hole))
paths = self.multi_poly_no_hole.split(datatype='array')
holes = self.multi_poly_no_hole.interface.holes(self.multi_poly_no_hole)
self.assertEqual(len(paths), len(holes))
self.assertEqual(len(holes), 1)
self.assertEqual(len(holes[0]), 2)
self.assertEqual(len(holes[0][0]), 0)
self.assertEqual(len(holes[0][1]), 0)
def test_distinct_multi_poly_holes_match(self):
self.assertTrue(self.distinct_polys.interface.has_holes(self.distinct_polys))
paths = self.distinct_polys.split(datatype='array')
holes = self.distinct_polys.interface.holes(self.distinct_polys)
self.assertEqual(len(paths), len(holes))
self.assertEqual(len(holes), 2)
self.assertEqual(len(holes[0]), 2)
self.assertEqual(len(holes[0][0]), 2)
self.assertEqual(len(holes[0][1]), 0)
self.assertEqual(len(holes[1]), 1)
self.assertEqual(len(holes[1][0]), 0)
def test_single_poly_hole_validation(self):
xs = [1, 2, 3]
ys = [2, 0, 7]
with self.assertRaises(DataError):
Polygons([{'x': xs, 'y': ys, 'holes': [[], []]}])
def test_multi_poly_hole_validation(self):
xs = [1, 2, 3, np.nan, 6, 7, 3]
ys = [2, 0, 7, np.nan, 7, 5, 2]
with self.assertRaises(DataError):
Polygons([{'x': xs, 'y': ys, 'holes': [[]]}])
class EllipseTests(ComparisonTestCase):
def setUp(self):
self.pentagon = np.array([[ 0.00000000e+00, 5.00000000e-01],
[ 4.75528258e-01, 1.54508497e-01],
[ 2.93892626e-01, -4.04508497e-01],
[ -2.93892626e-01, -4.04508497e-01],
[ -4.75528258e-01, 1.54508497e-01],
[ -1.22464680e-16, 5.00000000e-01]])
self.squashed = np.array([[ 0.00000000e+00, 1.00000000e+00],
[ 4.75528258e-01, 3.09016994e-01],
[ 2.93892626e-01, -8.09016994e-01],
[ -2.93892626e-01, -8.09016994e-01],
[ -4.75528258e-01, 3.09016994e-01],
[ -1.22464680e-16, 1.00000000e+00]])
def test_ellipse_simple_constructor(self):
ellipse = Ellipse(0,0,1, samples=100)
self.assertEqual(len(ellipse.data[0]), 100)
def test_ellipse_simple_constructor_pentagon(self):
ellipse = Ellipse(0,0,1, samples=6)
self.assertEqual(np.allclose(ellipse.data[0], self.pentagon), True)
def test_ellipse_tuple_constructor_squashed(self):
ellipse = Ellipse(0,0,(1,2), samples=6)
self.assertEqual(np.allclose(ellipse.data[0], self.squashed), True)
def test_ellipse_simple_constructor_squashed_aspect(self):
ellipse = Ellipse(0,0,2, aspect=0.5, samples=6)
self.assertEqual(np.allclose(ellipse.data[0], self.squashed), True)
class BoxTests(ComparisonTestCase):
def setUp(self):
self.rotated_square = np.array([[-0.27059805, -0.65328148],
[-0.65328148, 0.27059805],
[ 0.27059805, 0.65328148],
[ 0.65328148, -0.27059805],
[-0.27059805, -0.65328148]])
self.rotated_rect = np.array([[-0.73253782, -0.8446232 ],
[-1.11522125, 0.07925633],
[ 0.73253782, 0.8446232 ],
[ 1.11522125, -0.07925633],
[-0.73253782, -0.8446232 ]])
def test_box_simple_constructor_rotated(self):
box = Box(0,0,1, orientation=np.pi/8)
self.assertEqual(np.allclose(box.data[0], self.rotated_square), True)
def test_box_tuple_constructor_rotated(self):
box = Box(0,0,(2,1), orientation=np.pi/8)
self.assertEqual(np.allclose(box.data[0], self.rotated_rect), True)
def test_box_aspect_constructor_rotated(self):
box = Box(0,0,1, aspect=2, orientation=np.pi/8)
self.assertEqual(np.allclose(box.data[0], self.rotated_rect), True)
| |
from input import *
import numpy as np
from os import path
import skimage.io as skimgio
import PIL.Image
from PIL import Image
import imghdr
import random
import skimage as sk
import skimage.transform as sktf
from skimage.transform import SimilarityTransform
from skimage.transform import warp as skwarp
from img2d_utils import ImageTransformer2D
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
class Img2DColumn(ComplexColumn):
def __init__(self, name=None, columns_indexes=None, pre_transforms=[], post_transforms=[], is_raw_img=False,
is_related_path=False, reader=None, metadata=None):
super(Img2DColumn, self).__init__(name=name,
type=Column.Type.IMG_2D,
columns_indexes=columns_indexes,
ser_de=Img2DSerDe(is_raw_img),
reader=reader,
metadata=metadata,
pre_transforms=pre_transforms,
post_transforms=post_transforms)
if reader is None:
self._reader = Img2DReader(self, is_related_path)
if metadata is None:
self._metadata = Img2DColumnMetadata(self._name)
def csv_file_path(self, csv_file_path):
self._reader.csv_file_path(csv_file_path)
def process_on_write(self, record):
img_array, img_fmt = self.reader.read(record)
if self._metadata is not None:
self._metadata.aggregate(img_array)
for transform in self._pre_transforms:
img_array = transform.apply(img_array)
return self.ser_de.serialize((img_array, img_fmt))
@classmethod
def from_schema(cls, column_schema):
pre_transforms = []
post_transforms = []
for pre_transform in column_schema["pre_transforms"]:
pre_transforms.append(cls._build_transform(pre_transform))
for post_transform in column_schema["post_transforms"]:
post_transforms.append(cls._build_transform(post_transform))
indexes = None
if 'index' in column_schema:
indexes = column_schema['index']
metadata = None
if 'metadata' in column_schema:
metadata = Img2DColumnMetadata.deserialize(column_schema['metadata'])
is_related_path = False
if 'is_related_path' in column_schema:
is_related_path = column_schema['is_related_path']
img2d = Img2DColumn(name=str(column_schema['name']), columns_indexes=indexes, is_related_path=is_related_path,
pre_transforms=pre_transforms, post_transforms=post_transforms, metadata=metadata)
return img2d
@staticmethod
def _build_transform(transform):
type = transform["type"]
params = transform["params"]
if type == ImgCropTransform.type():
return ImgCropTransform.from_params(params)
if type == ImgResizeTransform.type():
return ImgResizeTransform(params)
if type == ImgNormalizationTransform.type():
return ImgNormalizationTransform.from_params(params)
raise TypeError("Unsupported column transform type: %s" % transform)
class ImgCropTransform(ColumnTransform):
def __init__(self, shape):
super(ImgCropTransform, self).__init__()
self._out_shape = shape
@staticmethod
def type():
return "imgCrop"
def apply(self, img):
return crop_image(img, self._out_shape)
@staticmethod
def from_params(params):
return ImgCropTransform((int(params['height']), int(params['width'])))
@property
def serialize(self):
return {"type": ImgCropTransform.type(), "params": {"height": self._out_shape[0], "width": self._out_shape[1]}}
class ImgResizeTransform(ColumnTransform):
def __init__(self, params):
super(ImgResizeTransform, self).__init__()
@staticmethod
def type():
return "imgResize"
def apply(self, data):
return data
@staticmethod
def config():
return {'type': 'input'}
@property
def serialize(self):
return {}
class ImgNormalizationTransform(ColumnTransform):
def __init__(self, is_global, mean=None, std=None):
super(ImgNormalizationTransform, self).__init__()
self._is_global = is_global
if self._is_global:
self._mean = mean
self._std = std
else:
self._mean = 0.
self._std = 1.
@staticmethod
def type():
return "imgNormalization"
def apply(self, data):
if self._is_global:
return (data - self._mean) / data.std
else:
mean = data.mean()
std = data.std()
return (data - mean) / std
@property
def serialize(self):
params = {"is_global": self._is_global, "mean": self._mean, "std": self._std}
return {"type": ImgCropTransform.type(), "params": params}
@staticmethod
def from_params(params):
is_global = True if params['is_global'] == "True" else False
mean = None
std = None
if is_global:
mean = float(params['mean'])
std = float(params['std'])
return ImgNormalizationTransform(is_global, mean, std)
class Img2DReader(ColumnReader):
def __init__(self, column, is_related_path=False):
super(Img2DReader, self).__init__(column)
self._is_related_path = is_related_path
self._data_path = None
def csv_file_path(self, data_path):
self._data_path = data_path
def read(self, csv_row):
path = str(csv_row[self._column.columns_indexes[0]])
if self._is_related_path:
path = os.path.join(self._data_path, path)
img_data = skimgio.imread(path)
img_fmt = imghdr.what(path)
return img_data, img_fmt
class Img2DSerDe(ColumnSerDe):
def __init__(self, is_raw_img):
self._is_raw_img = is_raw_img
def serialize(self, img):
img_data = img[0]
img_fmt = img[1]
img_ser = {}
if self._is_raw_img:
img_ser['rows'] = img_data.shape[0]
img_ser['cols'] = img_data.shape[1]
img_ser['ch_num'] = 1
if len(img_data.shape) > 2:
img_ser['ch_num'] = img_data.shape[2]
img_ser['data'] = img_data.tostring()
else:
img_array = PIL.Image.fromarray(img_data.astype(np.uint8))
img_buffer = StringIO()
img_array.save(img_buffer, format=img_fmt)
img_ser['data'] = img_buffer.getvalue()
return img_ser
def deserialize(self, img):
if 'cols' in img:
rows = img['rows']
cols = img['cols']
ch_num = img['ch_num']
img = np.frombuffer(img['data'], dtype=np.uint8)
if ch_num == 1:
img = img.reshape((rows, cols))
else:
img = img.reshape((rows, cols, ch_num))
else:
img = skimgio.imread(StringIO(img['data']))
return img
class Img2DColumnMetadata(ColumnMetadata):
def __init__(self, column_name=None):
self._column_name = column_name
self._path = None
self._img = None
self._img_num = 0
@property
def img(self):
return self._img
@img.setter
def img(self, img):
self._img = img
@property
def img_num(self):
return self._img_num
def aggregate(self, img):
img = img.astype(np.float)
if self._img is None:
self._img = img
else:
self._img += img
self._img_num += 1
def merge(self, agg_metadata):
for metadata in agg_metadata:
if self._img is None:
self._img = metadata.img
else:
self._img += metadata.img
self._img_num = self._img_num + metadata.img_num
self._img = self._img / self._img_num
def path(self, path):
self._path = path
def serialize(self):
img = Image.fromarray(np.uint8(self._img)).convert('RGB')
img_path_prefix = self._column_name
if self._column_name is None:
img_path_prefix = str(random.getrandbits(16))
mean_img_path = path.join(self._path, img_path_prefix + '-mean-img.jpg')
img.save(mean_img_path)
return {'mean-img-path': mean_img_path}
@classmethod
def deserialize(cls, schema):
mean_img_path = path.join(schema['mean-img-path'])
img = skimgio.imread(mean_img_path)
metadata = Img2DColumnMetadata()
metadata.path(os.path.dirname(mean_img_path))
metadata.img = img
return metadata
# Util methods for img2d
def squash_image(img, output_shape):
return sktf.resize(img, output_shape=output_shape)
def fill_image(img, output_shape):
n_rows = img.shape[0]
n_cols = img.shape[1]
if n_rows == output_shape[0] and n_cols == output_shape[1]:
return img.copy()
size_out = float(output_shape[0]) / float(output_shape[1])
size_in = float(n_rows) / float(n_cols)
if size_out > size_in:
new_shape = (int(n_rows * float(output_shape[1]) / n_cols), output_shape[1])
else:
new_shape = (output_shape[0], int(n_cols * float(output_shape[0]) / n_rows))
timg = sktf.resize(img, new_shape, preserve_range=True)
timgShape = timg.shape[:2]
nch = 1 if timg.ndim < 3 else timg.shape[-1]
p0 = (int((output_shape[0] - timgShape[0]) / 2.), int((output_shape[1] - timgShape[1]) / 2.))
if nch == 1:
tret = np.zeros(output_shape, dtype=img.dtype)
tret[p0[0]:p0[0] + timg.shape[0], p0[1]:p0[1] + timg.shape[1]] = timg
else:
tret = np.zeros((output_shape[0], output_shape[1], nch), dtype=img.dtype)
tret[p0[0]:p0[0] + timg.shape[0], p0[1]:p0[1] + timg.shape[1], :] = timg
return tret
def crop_image(img, output_shape):
# TODO: check performance: code is realy clean, but...
size_in = (img.shape[1], img.shape[0])
size_out = (output_shape[1], output_shape[0])
transform = SimilarityTransform(translation=(-0.5 * (size_out[0] - size_in[0]), -0.5 * (size_out[1] - size_in[1])))
return skwarp(img, transform, output_shape=output_shape)
| |
"""Example training a memory neural net on the bAbI dataset.
References Keras and is based off of https://keras.io/examples/babi_memnn/.
"""
from __future__ import print_function
from tensorflow.keras.models import Sequential, Model, load_model
from tensorflow.keras.layers import Embedding
from tensorflow.keras.layers import (Input, Activation, Dense, Permute,
Dropout)
from tensorflow.keras.layers import add, dot, concatenate
from tensorflow.keras.layers import LSTM
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.utils import get_file
from tensorflow.keras.preprocessing.sequence import pad_sequences
from filelock import FileLock
import os
import argparse
import tarfile
import numpy as np
import re
from ray import tune
def tokenize(sent):
"""Return the tokens of a sentence including punctuation.
>>> tokenize("Bob dropped the apple. Where is the apple?")
["Bob", "dropped", "the", "apple", ".", "Where", "is", "the", "apple", "?"]
"""
return [x.strip() for x in re.split(r"(\W+)?", sent) if x and x.strip()]
def parse_stories(lines, only_supporting=False):
"""Parse stories provided in the bAbi tasks format
If only_supporting is true, only the sentences
that support the answer are kept.
"""
data = []
story = []
for line in lines:
line = line.decode("utf-8").strip()
nid, line = line.split(" ", 1)
nid = int(nid)
if nid == 1:
story = []
if "\t" in line:
q, a, supporting = line.split("\t")
q = tokenize(q)
if only_supporting:
# Only select the related substory
supporting = map(int, supporting.split())
substory = [story[i - 1] for i in supporting]
else:
# Provide all the substories
substory = [x for x in story if x]
data.append((substory, q, a))
story.append("")
else:
sent = tokenize(line)
story.append(sent)
return data
def get_stories(f, only_supporting=False, max_length=None):
"""Given a file name, read the file,
retrieve the stories,
and then convert the sentences into a single story.
If max_length is supplied,
any stories longer than max_length tokens will be discarded.
"""
def flatten(data):
return sum(data, [])
data = parse_stories(f.readlines(), only_supporting=only_supporting)
data = [(flatten(story), q, answer) for story, q, answer in data
if not max_length or len(flatten(story)) < max_length]
return data
def vectorize_stories(word_idx, story_maxlen, query_maxlen, data):
inputs, queries, answers = [], [], []
for story, query, answer in data:
inputs.append([word_idx[w] for w in story])
queries.append([word_idx[w] for w in query])
answers.append(word_idx[answer])
return (pad_sequences(inputs, maxlen=story_maxlen),
pad_sequences(queries, maxlen=query_maxlen), np.array(answers))
def read_data(finish_fast=False):
# Get the file
try:
path = get_file(
"babi-tasks-v1-2.tar.gz",
origin="https://s3.amazonaws.com/text-datasets/"
"babi_tasks_1-20_v1-2.tar.gz")
except Exception:
print(
"Error downloading dataset, please download it manually:\n"
"$ wget http://www.thespermwhale.com/jaseweston/babi/tasks_1-20_v1-2" # noqa: E501
".tar.gz\n"
"$ mv tasks_1-20_v1-2.tar.gz ~/.keras/datasets/babi-tasks-v1-2.tar.gz" # noqa: E501
)
raise
# Choose challenge
challenges = {
# QA1 with 10,000 samples
"single_supporting_fact_10k": "tasks_1-20_v1-2/en-10k/qa1_"
"single-supporting-fact_{}.txt",
# QA2 with 10,000 samples
"two_supporting_facts_10k": "tasks_1-20_v1-2/en-10k/qa2_"
"two-supporting-facts_{}.txt",
}
challenge_type = "single_supporting_fact_10k"
challenge = challenges[challenge_type]
with tarfile.open(path) as tar:
train_stories = get_stories(tar.extractfile(challenge.format("train")))
test_stories = get_stories(tar.extractfile(challenge.format("test")))
if finish_fast:
train_stories = train_stories[:64]
test_stories = test_stories[:64]
return train_stories, test_stories
class MemNNModel(tune.Trainable):
def build_model(self):
"""Helper method for creating the model"""
vocab = set()
for story, q, answer in self.train_stories + self.test_stories:
vocab |= set(story + q + [answer])
vocab = sorted(vocab)
# Reserve 0 for masking via pad_sequences
vocab_size = len(vocab) + 1
story_maxlen = max(
len(x) for x, _, _ in self.train_stories + self.test_stories)
query_maxlen = max(
len(x) for _, x, _ in self.train_stories + self.test_stories)
word_idx = {c: i + 1 for i, c in enumerate(vocab)}
self.inputs_train, self.queries_train, self.answers_train = (
vectorize_stories(word_idx, story_maxlen, query_maxlen,
self.train_stories))
self.inputs_test, self.queries_test, self.answers_test = (
vectorize_stories(word_idx, story_maxlen, query_maxlen,
self.test_stories))
# placeholders
input_sequence = Input((story_maxlen, ))
question = Input((query_maxlen, ))
# encoders
# embed the input sequence into a sequence of vectors
input_encoder_m = Sequential()
input_encoder_m.add(Embedding(input_dim=vocab_size, output_dim=64))
input_encoder_m.add(Dropout(self.config.get("dropout", 0.3)))
# output: (samples, story_maxlen, embedding_dim)
# embed the input into a sequence of vectors of size query_maxlen
input_encoder_c = Sequential()
input_encoder_c.add(
Embedding(input_dim=vocab_size, output_dim=query_maxlen))
input_encoder_c.add(Dropout(self.config.get("dropout", 0.3)))
# output: (samples, story_maxlen, query_maxlen)
# embed the question into a sequence of vectors
question_encoder = Sequential()
question_encoder.add(
Embedding(
input_dim=vocab_size, output_dim=64,
input_length=query_maxlen))
question_encoder.add(Dropout(self.config.get("dropout", 0.3)))
# output: (samples, query_maxlen, embedding_dim)
# encode input sequence and questions (which are indices)
# to sequences of dense vectors
input_encoded_m = input_encoder_m(input_sequence)
input_encoded_c = input_encoder_c(input_sequence)
question_encoded = question_encoder(question)
# compute a "match" between the first input vector sequence
# and the question vector sequence
# shape: `(samples, story_maxlen, query_maxlen)`
match = dot([input_encoded_m, question_encoded], axes=(2, 2))
match = Activation("softmax")(match)
# add the match matrix with the second input vector sequence
response = add(
[match, input_encoded_c]) # (samples, story_maxlen, query_maxlen)
response = Permute(
(2, 1))(response) # (samples, query_maxlen, story_maxlen)
# concatenate the match matrix with the question vector sequence
answer = concatenate([response, question_encoded])
# the original paper uses a matrix multiplication.
# we choose to use a RNN instead.
answer = LSTM(32)(answer) # (samples, 32)
# one regularization layer -- more would probably be needed.
answer = Dropout(self.config.get("dropout", 0.3))(answer)
answer = Dense(vocab_size)(answer) # (samples, vocab_size)
# we output a probability distribution over the vocabulary
answer = Activation("softmax")(answer)
# build the final model
model = Model([input_sequence, question], answer)
return model
def setup(self, config):
with FileLock(os.path.expanduser("~/.tune.lock")):
self.train_stories, self.test_stories = read_data(
config["finish_fast"])
model = self.build_model()
rmsprop = RMSprop(
lr=self.config.get("lr", 1e-3), rho=self.config.get("rho", 0.9))
model.compile(
optimizer=rmsprop,
loss="sparse_categorical_crossentropy",
metrics=["accuracy"])
self.model = model
def step(self):
# train
self.model.fit(
[self.inputs_train, self.queries_train],
self.answers_train,
batch_size=self.config.get("batch_size", 32),
epochs=self.config.get("epochs", 1),
validation_data=([self.inputs_test, self.queries_test],
self.answers_test),
verbose=0)
_, accuracy = self.model.evaluate(
[self.inputs_train, self.queries_train],
self.answers_train,
verbose=0)
return {"mean_accuracy": accuracy}
def save_checkpoint(self, checkpoint_dir):
file_path = checkpoint_dir + "/model"
self.model.save(file_path)
return file_path
def load_checkpoint(self, path):
# See https://stackoverflow.com/a/42763323
del self.model
self.model = load_model(path)
if __name__ == "__main__":
import ray
from ray.tune.schedulers import PopulationBasedTraining
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
args, _ = parser.parse_known_args()
ray.init(num_cpus=2)
read_data()
pbt = PopulationBasedTraining(
perturbation_interval=2,
hyperparam_mutations={
"dropout": lambda: np.random.uniform(0, 1),
"lr": lambda: 10**np.random.randint(-10, 0),
"rho": lambda: np.random.uniform(0, 1)
})
results = tune.run(
MemNNModel,
name="pbt_babi_memnn",
scheduler=pbt,
metric="mean_accuracy",
mode="max",
stop={"training_iteration": 4 if args.smoke_test else 100},
num_samples=2,
config={
"finish_fast": args.smoke_test,
"batch_size": 32,
"epochs": 1,
"dropout": 0.3,
"lr": 0.01,
"rho": 0.9
})
| |
#!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Class for bitcoind node under test"""
import contextlib
import decimal
import errno
from enum import Enum
import http.client
import json
import logging
import os
import re
import subprocess
import tempfile
import time
import urllib.parse
from .authproxy import JSONRPCException
from .util import (
append_config,
delete_cookie_file,
get_rpc_proxy,
rpc_url,
wait_until,
p2p_port,
)
# For Python 3.4 compatibility
JSONDecodeError = getattr(json, "JSONDecodeError", ValueError)
BITCOIND_PROC_WAIT_TIMEOUT = 60
class FailedToStartError(Exception):
"""Raised when a node fails to start correctly."""
class ErrorMatch(Enum):
FULL_TEXT = 1
FULL_REGEX = 2
PARTIAL_REGEX = 3
class TestNode():
"""A class for representing a bitcoind node under test.
This class contains:
- state about the node (whether it's running, etc)
- a Python subprocess.Popen object representing the running process
- an RPC connection to the node
- one or more P2P connections to the node
To make things easier for the test writer, any unrecognised messages will
be dispatched to the RPC connection."""
def __init__(self, i, datadir, *, rpchost, timewait, bitcoind, bitcoin_cli, mocktime, coverage_dir, extra_conf=None, extra_args=None, use_cli=False):
self.index = i
self.datadir = datadir
self.stdout_dir = os.path.join(self.datadir, "stdout")
self.stderr_dir = os.path.join(self.datadir, "stderr")
self.rpchost = rpchost
self.rpc_timeout = timewait
self.binary = bitcoind
self.coverage_dir = coverage_dir
if extra_conf != None:
append_config(datadir, extra_conf)
# Most callers will just need to add extra args to the standard list below.
# For those callers that need more flexibility, they can just set the args property directly.
# Note that common args are set in the config file (see initialize_datadir)
self.extra_args = extra_args
self.args = [
self.binary,
"-datadir=" + self.datadir,
"-logtimemicros",
"-debug",
"-debugexclude=libevent",
"-debugexclude=leveldb",
"-mocktime=" + str(mocktime),
"-uacomment=testnode%d" % i
]
self.cli = TestNodeCLI(bitcoin_cli, self.datadir)
self.use_cli = use_cli
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.url = None
self.log = logging.getLogger('TestFramework.node%d' % i)
self.cleanup_on_exit = True # Whether to kill the node when this object goes away
self.p2ps = []
def get_deterministic_priv_key(self):
"""Return a deterministic priv key in base58, that only depends on the node's index"""
PRIV_KEYS = [
# adress , privkey
('mjTkW3DjgyZck4KbiRusZsqTgaYTxdSz6z', 'cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW'),
('msX6jQXvxiNhx3Q62PKeLPrhrqZQdSimTg', 'cUxsWyKyZ9MAQTaAhUQWJmBbSvHMwSmuv59KgxQV7oZQU3PXN3KE'),
('mnonCMyH9TmAsSj3M59DsbH8H63U3RKoFP', 'cTrh7dkEAeJd6b3MRX9bZK8eRmNqVCMH3LSUkE3dSFDyzjU38QxK'),
('mqJupas8Dt2uestQDvV2NH3RU8uZh2dqQR', 'cVuKKa7gbehEQvVq717hYcbE9Dqmq7KEBKqWgWrYBa2CKKrhtRim'),
('msYac7Rvd5ywm6pEmkjyxhbCDKqWsVeYws', 'cQDCBuKcjanpXDpCqacNSjYfxeQj8G6CAtH1Dsk3cXyqLNC4RPuh'),
('n2rnuUnwLgXqf9kk2kjvVm8R5BZK1yxQBi', 'cQakmfPSLSqKHyMFGwAqKHgWUiofJCagVGhiB4KCainaeCSxeyYq'),
('myzuPxRwsf3vvGzEuzPfK9Nf2RfwauwYe6', 'cQMpDLJwA8DBe9NcQbdoSb1BhmFxVjWD5gRyrLZCtpuF9Zi3a9RK'),
('mumwTaMtbxEPUswmLBBN3vM9oGRtGBrys8', 'cSXmRKXVcoouhNNVpcNKFfxsTsToY5pvB9DVsFksF1ENunTzRKsy'),
('mpV7aGShMkJCZgbW7F6iZgrvuPHjZjH9qg', 'cSoXt6tm3pqy43UMabY6eUTmR3eSUYFtB2iNQDGgb3VUnRsQys2k'),
]
return PRIV_KEYS[self.index]
def _node_msg(self, msg: str) -> str:
"""Return a modified msg that identifies this node by its index as a debugging aid."""
return "[node %d] %s" % (self.index, msg)
def _raise_assertion_error(self, msg: str):
"""Raise an AssertionError with msg modified to identify this node."""
raise AssertionError(self._node_msg(msg))
def __del__(self):
# Ensure that we don't leave any bitcoind processes lying around after
# the test ends
if self.process and self.cleanup_on_exit:
# Should only happen on test failure
# Avoid using logger, as that may have already been shutdown when
# this destructor is called.
print(self._node_msg("Cleaning up leftover process"))
self.process.kill()
def __getattr__(self, name):
"""Dispatches any unrecognised messages to the RPC connection or a CLI instance."""
if self.use_cli:
return getattr(self.cli, name)
else:
assert self.rpc_connected and self.rpc is not None, self._node_msg("Error: no RPC connection")
return getattr(self.rpc, name)
def start(self, extra_args=None, *, stdout=None, stderr=None, **kwargs):
"""Start the node."""
if extra_args is None:
extra_args = self.extra_args
# Add a new stdout and stderr file each time bitcoind is started
if stderr is None:
stderr = tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False)
if stdout is None:
stdout = tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False)
self.stderr = stderr
self.stdout = stdout
# Delete any existing cookie file -- if such a file exists (eg due to
# unclean shutdown), it will get overwritten anyway by bitcoind, and
# potentially interfere with our attempt to authenticate
delete_cookie_file(self.datadir)
# add environment variable LIBC_FATAL_STDERR_=1 so that libc errors are written to stderr and not the terminal
subp_env = dict(os.environ, LIBC_FATAL_STDERR_="1")
self.process = subprocess.Popen(self.args + extra_args, env=subp_env, stdout=stdout, stderr=stderr, **kwargs)
self.running = True
self.log.debug("bitcoind started, waiting for RPC to come up")
def wait_for_rpc_connection(self):
"""Sets up an RPC connection to the bitcoind process. Returns False if unable to connect."""
# Poll at a rate of four times per second
poll_per_s = 4
for _ in range(poll_per_s * self.rpc_timeout):
if self.process.poll() is not None:
raise FailedToStartError(self._node_msg(
'bitcoind exited with status {} during initialization'.format(self.process.returncode)))
try:
self.rpc = get_rpc_proxy(rpc_url(self.datadir, self.index, self.rpchost), self.index, timeout=self.rpc_timeout, coveragedir=self.coverage_dir)
self.rpc.getblockcount()
# If the call to getblockcount() succeeds then the RPC connection is up
self.rpc_connected = True
self.url = self.rpc.url
self.log.debug("RPC successfully started")
return
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
if e.error['code'] != -28: # RPC in warmup?
raise # unknown JSON RPC exception
except ValueError as e: # cookie file not found and no rpcuser or rpcassword. bitcoind still starting
if "No RPC credentials" not in str(e):
raise
time.sleep(1.0 / poll_per_s)
self._raise_assertion_error("Unable to connect to bitcoind")
def get_wallet_rpc(self, wallet_name):
if self.use_cli:
return self.cli("-rpcwallet={}".format(wallet_name))
else:
assert self.rpc_connected and self.rpc, self._node_msg("RPC not connected")
wallet_path = "wallet/{}".format(urllib.parse.quote(wallet_name))
return self.rpc / wallet_path
def stop_node(self, expected_stderr=''):
"""Stop the node."""
if not self.running:
return
self.log.debug("Stopping node")
try:
self.stop()
except http.client.CannotSendRequest:
self.log.exception("Unable to stop node.")
# Check that stderr is as expected
self.stderr.seek(0)
stderr = self.stderr.read().decode('utf-8').strip()
if stderr != expected_stderr:
raise AssertionError("Unexpected stderr {} != {}".format(stderr, expected_stderr))
self.stdout.close()
self.stderr.close()
del self.p2ps[:]
def is_node_stopped(self):
"""Checks whether the node has stopped.
Returns True if the node has stopped. False otherwise.
This method is responsible for freeing resources (self.process)."""
if not self.running:
return True
return_code = self.process.poll()
if return_code is None:
return False
# process has stopped. Assert that it didn't return an error code.
assert return_code == 0, self._node_msg(
"Node returned non-zero exit code (%d) when stopping" % return_code)
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.log.debug("Node stopped")
return True
def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT):
wait_until(self.is_node_stopped, timeout=timeout)
@contextlib.contextmanager
def assert_debug_log(self, expected_msgs):
debug_log = os.path.join(self.datadir, 'regtest', 'debug.log')
with open(debug_log, encoding='utf-8') as dl:
dl.seek(0, 2)
prev_size = dl.tell()
try:
yield
finally:
with open(debug_log, encoding='utf-8') as dl:
dl.seek(prev_size)
log = dl.read()
print_log = " - " + "\n - ".join(log.splitlines())
for expected_msg in expected_msgs:
if re.search(re.escape(expected_msg), log, flags=re.MULTILINE) is None:
self._raise_assertion_error('Expected message "{}" does not partially match log:\n\n{}\n\n'.format(expected_msg, print_log))
def assert_start_raises_init_error(self, extra_args=None, expected_msg=None, match=ErrorMatch.FULL_TEXT, *args, **kwargs):
"""Attempt to start the node and expect it to raise an error.
extra_args: extra arguments to pass through to bitcoind
expected_msg: regex that stderr should match when bitcoind fails
Will throw if bitcoind starts without an error.
Will throw if an expected_msg is provided and it does not match bitcoind's stdout."""
with tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) as log_stderr, \
tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) as log_stdout:
try:
self.start(extra_args, stdout=log_stdout, stderr=log_stderr, *args, **kwargs)
self.wait_for_rpc_connection()
self.stop_node()
self.wait_until_stopped()
except FailedToStartError as e:
self.log.debug('bitcoind failed to start: %s', e)
self.running = False
self.process = None
# Check stderr for expected message
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8').strip()
if match == ErrorMatch.PARTIAL_REGEX:
if re.search(expected_msg, stderr, flags=re.MULTILINE) is None:
self._raise_assertion_error(
'Expected message "{}" does not partially match stderr:\n"{}"'.format(expected_msg, stderr))
elif match == ErrorMatch.FULL_REGEX:
if re.fullmatch(expected_msg, stderr) is None:
self._raise_assertion_error(
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
elif match == ErrorMatch.FULL_TEXT:
if expected_msg != stderr:
self._raise_assertion_error(
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
else:
if expected_msg is None:
assert_msg = "bitcoind should have exited with an error"
else:
assert_msg = "bitcoind should have exited with expected error " + expected_msg
self._raise_assertion_error(assert_msg)
def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, **kwargs):
"""Add a p2p connection to the node.
This method adds the p2p connection to the self.p2ps list and also
returns the connection to the caller."""
if 'dstport' not in kwargs:
kwargs['dstport'] = p2p_port(self.index)
if 'dstaddr' not in kwargs:
kwargs['dstaddr'] = '127.0.0.1'
p2p_conn.peer_connect(**kwargs)()
self.p2ps.append(p2p_conn)
if wait_for_verack:
p2p_conn.wait_for_verack()
return p2p_conn
@property
def p2p(self):
"""Return the first p2p connection
Convenience property - most tests only use a single p2p connection to each
node, so this saves having to write node.p2ps[0] many times."""
assert self.p2ps, self._node_msg("No p2p connection")
return self.p2ps[0]
def disconnect_p2ps(self):
"""Close all p2p connections to the node."""
for p in self.p2ps:
p.peer_disconnect()
del self.p2ps[:]
class TestNodeCLIAttr:
def __init__(self, cli, command):
self.cli = cli
self.command = command
def __call__(self, *args, **kwargs):
return self.cli.send_cli(self.command, *args, **kwargs)
def get_request(self, *args, **kwargs):
return lambda: self(*args, **kwargs)
class TestNodeCLI():
"""Interface to bitcoin-cli for an individual node"""
def __init__(self, binary, datadir):
self.options = []
self.binary = binary
self.datadir = datadir
self.input = None
self.log = logging.getLogger('TestFramework.bitcoincli')
def __call__(self, *options, input=None):
# TestNodeCLI is callable with bitcoin-cli command-line options
cli = TestNodeCLI(self.binary, self.datadir)
cli.options = [str(o) for o in options]
cli.input = input
return cli
def __getattr__(self, command):
return TestNodeCLIAttr(self, command)
def batch(self, requests):
results = []
for request in requests:
try:
results.append(dict(result=request()))
except JSONRPCException as e:
results.append(dict(error=e))
return results
def send_cli(self, command=None, *args, **kwargs):
"""Run bitcoin-cli command. Deserializes returned string as python object."""
pos_args = [str(arg).lower() if type(arg) is bool else str(arg) for arg in args]
named_args = [str(key) + "=" + str(value) for (key, value) in kwargs.items()]
assert not (pos_args and named_args), "Cannot use positional arguments and named arguments in the same bitcoin-cli call"
p_args = [self.binary, "-datadir=" + self.datadir] + self.options
if named_args:
p_args += ["-named"]
if command is not None:
p_args += [command]
p_args += pos_args + named_args
self.log.debug("Running bitcoin-cli command: %s" % command)
process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
cli_stdout, cli_stderr = process.communicate(input=self.input)
returncode = process.poll()
if returncode:
match = re.match(r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr)
if match:
code, message = match.groups()
raise JSONRPCException(dict(code=int(code), message=message))
# Ignore cli_stdout, raise with cli_stderr
raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr)
try:
return json.loads(cli_stdout, parse_float=decimal.Decimal)
except JSONDecodeError:
return cli_stdout.rstrip("\n")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.