gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
import itertools
from numba.core import types
from numba.core.typeconv.typeconv import TypeManager, TypeCastingRules
from numba.core.typeconv import rules
from numba.core.typeconv import castgraph, Conversion
import unittest
class CompatibilityTestMixin(unittest.TestCase):
def check_number_compatibility(self, check_compatible):
b = types.boolean
i8 = types.int8
i16 = types.int16
i32 = types.int32
i64 = types.int64
u8 = types.uint8
u16 = types.uint16
u32 = types.uint32
u64 = types.uint64
f16 = types.float16
f32 = types.float32
f64 = types.float64
c64 = types.complex64
c128 = types.complex128
self.assertEqual(check_compatible(i32, i32), Conversion.exact)
self.assertEqual(check_compatible(b, i8), Conversion.safe)
self.assertEqual(check_compatible(b, u8), Conversion.safe)
self.assertEqual(check_compatible(i8, b), Conversion.unsafe)
self.assertEqual(check_compatible(u8, b), Conversion.unsafe)
self.assertEqual(check_compatible(i32, i64), Conversion.promote)
self.assertEqual(check_compatible(i32, u32), Conversion.unsafe)
self.assertEqual(check_compatible(u32, i32), Conversion.unsafe)
self.assertEqual(check_compatible(u32, i64), Conversion.safe)
self.assertEqual(check_compatible(i16, f16), Conversion.unsafe)
self.assertEqual(check_compatible(i32, f32), Conversion.unsafe)
self.assertEqual(check_compatible(u32, f32), Conversion.unsafe)
self.assertEqual(check_compatible(i32, f64), Conversion.safe)
self.assertEqual(check_compatible(u32, f64), Conversion.safe)
# Note this is inconsistent with i32 -> f32...
self.assertEqual(check_compatible(i64, f64), Conversion.safe)
self.assertEqual(check_compatible(u64, f64), Conversion.safe)
self.assertEqual(check_compatible(f32, c64), Conversion.safe)
self.assertEqual(check_compatible(f64, c128), Conversion.safe)
self.assertEqual(check_compatible(f64, c64), Conversion.unsafe)
# Propagated compatibility relationships
self.assertEqual(check_compatible(i16, f64), Conversion.safe)
self.assertEqual(check_compatible(i16, i64), Conversion.promote)
self.assertEqual(check_compatible(i32, c64), Conversion.unsafe)
self.assertEqual(check_compatible(i32, c128), Conversion.safe)
self.assertEqual(check_compatible(i32, u64), Conversion.unsafe)
for ta, tb in itertools.product(types.number_domain,
types.number_domain):
if ta in types.complex_domain and tb not in types.complex_domain:
continue
self.assertTrue(check_compatible(ta, tb) is not None,
msg="No cast from %s to %s" % (ta, tb))
class TestTypeConv(CompatibilityTestMixin, unittest.TestCase):
def test_typeconv(self):
tm = TypeManager()
i32 = types.int32
i64 = types.int64
f32 = types.float32
tm.set_promote(i32, i64)
tm.set_unsafe_convert(i32, f32)
sig = (i32, f32)
ovs = [
(i32, i32),
(f32, f32),
(i64, i64),
]
# allow_unsafe = True => a conversion from i32 to f32 is chosen
sel = tm.select_overload(sig, ovs, True, False)
self.assertEqual(sel, 1)
# allow_unsafe = False => no overload available
with self.assertRaises(TypeError):
sel = tm.select_overload(sig, ovs, False, False)
def test_default_rules(self):
tm = rules.default_type_manager
self.check_number_compatibility(tm.check_compatible)
def test_overload1(self):
tm = rules.default_type_manager
i32 = types.int32
i64 = types.int64
sig = (i64, i32, i32)
ovs = [
(i32, i32, i32),
(i64, i64, i64),
]
# The first overload is unsafe, the second is safe => the second
# is always chosen, regardless of allow_unsafe.
self.assertEqual(tm.select_overload(sig, ovs, True, False), 1)
self.assertEqual(tm.select_overload(sig, ovs, False, False), 1)
def test_overload2(self):
tm = rules.default_type_manager
i16 = types.int16
i32 = types.int32
i64 = types.int64
sig = (i32, i16, i32)
ovs = [
# Three promotes
(i64, i64, i64),
# One promotes, two exact types
(i32, i32, i32),
# Two unsafe converts, one exact type
(i16, i16, i16),
]
self.assertEqual(tm.select_overload(sig, ovs, allow_unsafe=False,
exact_match_required=False), 1)
self.assertEqual(tm.select_overload(sig, ovs, allow_unsafe=True,
exact_match_required=False), 1)
# The same in reverse order
ovs.reverse()
self.assertEqual(tm.select_overload(sig, ovs, allow_unsafe=False,
exact_match_required=False), 1)
self.assertEqual(tm.select_overload(sig, ovs, allow_unsafe=True,
exact_match_required=False), 1)
def test_overload3(self):
# Promotes should be preferred over safe converts
tm = rules.default_type_manager
i32 = types.int32
i64 = types.int64
f64 = types.float64
sig = (i32, i32)
ovs = [
# Two promotes
(i64, i64),
# Two safe converts
(f64, f64),
]
self.assertEqual(tm.select_overload(sig, ovs, allow_unsafe=False,
exact_match_required=False), 0)
self.assertEqual(tm.select_overload(sig, ovs, allow_unsafe=True,
exact_match_required=False), 0)
# The same in reverse order
ovs.reverse()
self.assertEqual(tm.select_overload(sig, ovs, allow_unsafe=False,
exact_match_required=False), 1)
self.assertEqual(tm.select_overload(sig, ovs, allow_unsafe=True,
exact_match_required=False), 1)
def test_overload4(self):
tm = rules.default_type_manager
i16 = types.int16
i32 = types.int32
i64 = types.int64
f16 = types.float16
f32 = types.float32
sig = (i16, f16, f16)
ovs = [
# One unsafe, one promote, one exact
(f16, f32, f16),
# Two unsafe, one exact types
(f32, i32, f16),
]
self.assertEqual(tm.select_overload(sig, ovs, allow_unsafe=True,
exact_match_required=False), 0)
def test_type_casting_rules(self):
tm = TypeManager()
tcr = TypeCastingRules(tm)
i16 = types.int16
i32 = types.int32
i64 = types.int64
f64 = types.float64
f32 = types.float32
f16 = types.float16
made_up = types.Dummy("made_up")
tcr.promote_unsafe(i32, i64)
tcr.safe_unsafe(i32, f64)
tcr.promote_unsafe(f32, f64)
tcr.promote_unsafe(f16, f32)
tcr.unsafe_unsafe(i16, f16)
def base_test():
# As declared
self.assertEqual(tm.check_compatible(i32, i64), Conversion.promote)
self.assertEqual(tm.check_compatible(i32, f64), Conversion.safe)
self.assertEqual(tm.check_compatible(f16, f32), Conversion.promote)
self.assertEqual(tm.check_compatible(f32, f64), Conversion.promote)
self.assertEqual(tm.check_compatible(i64, i32), Conversion.unsafe)
self.assertEqual(tm.check_compatible(f64, i32), Conversion.unsafe)
self.assertEqual(tm.check_compatible(f64, f32), Conversion.unsafe)
# Propagated
self.assertEqual(tm.check_compatible(i64, f64), Conversion.unsafe)
self.assertEqual(tm.check_compatible(f64, i64), Conversion.unsafe)
self.assertEqual(tm.check_compatible(i64, f32), Conversion.unsafe)
self.assertEqual(tm.check_compatible(i32, f32), Conversion.unsafe)
self.assertEqual(tm.check_compatible(f32, i32), Conversion.unsafe)
self.assertEqual(tm.check_compatible(i16, f16), Conversion.unsafe)
self.assertEqual(tm.check_compatible(f16, i16), Conversion.unsafe)
# Test base graph
base_test()
self.assertIsNone(tm.check_compatible(i64, made_up))
self.assertIsNone(tm.check_compatible(i32, made_up))
self.assertIsNone(tm.check_compatible(f32, made_up))
self.assertIsNone(tm.check_compatible(made_up, f64))
self.assertIsNone(tm.check_compatible(made_up, i64))
# Add new test
tcr.promote(f64, made_up)
tcr.unsafe(made_up, i32)
# Ensure the graph did not change by adding the new type
base_test()
# To "made up" type
self.assertEqual(tm.check_compatible(i64, made_up), Conversion.unsafe)
self.assertEqual(tm.check_compatible(i32, made_up), Conversion.safe)
self.assertEqual(tm.check_compatible(f32, made_up), Conversion.promote)
self.assertEqual(tm.check_compatible(made_up, f64), Conversion.unsafe)
self.assertEqual(tm.check_compatible(made_up, i64), Conversion.unsafe)
def test_castgraph_propagate(self):
saved = []
def callback(src, dst, rel):
saved.append((src, dst, rel))
tg = castgraph.TypeGraph(callback)
i32 = types.int32
i64 = types.int64
f64 = types.float64
f32 = types.float32
tg.insert_rule(i32, i64, Conversion.promote)
tg.insert_rule(i64, i32, Conversion.unsafe)
saved.append(None)
tg.insert_rule(i32, f64, Conversion.safe)
tg.insert_rule(f64, i32, Conversion.unsafe)
saved.append(None)
tg.insert_rule(f32, f64, Conversion.promote)
tg.insert_rule(f64, f32, Conversion.unsafe)
self.assertIn((i32, i64, Conversion.promote), saved[0:2])
self.assertIn((i64, i32, Conversion.unsafe), saved[0:2])
self.assertIs(saved[2], None)
self.assertIn((i32, f64, Conversion.safe), saved[3:7])
self.assertIn((f64, i32, Conversion.unsafe), saved[3:7])
self.assertIn((i64, f64, Conversion.unsafe), saved[3:7])
self.assertIn((i64, f64, Conversion.unsafe), saved[3:7])
self.assertIs(saved[7], None)
self.assertIn((f32, f64, Conversion.promote), saved[8:14])
self.assertIn((f64, f32, Conversion.unsafe), saved[8:14])
self.assertIn((f32, i32, Conversion.unsafe), saved[8:14])
self.assertIn((i32, f32, Conversion.unsafe), saved[8:14])
self.assertIn((f32, i64, Conversion.unsafe), saved[8:14])
self.assertIn((i64, f32, Conversion.unsafe), saved[8:14])
self.assertEqual(len(saved[14:]), 0)
if __name__ == '__main__':
unittest.main()
|
|
# -*- coding: utf-8 -*-
from flask import abort, url_for, flash, redirect, g, session, render_template, request
from coaster.utils import valid_username
from coaster.views import get_next_url
from baseframe import _, csrf
from baseframe.signals import exception_catchall
from lastuser_core import login_registry
from lastuser_core.models import db, getextid, merge_users, User, UserEmail, UserExternalId, UserEmailClaim
from lastuser_core.registry import LoginInitError, LoginCallbackError
from lastuser_core.signals import user_data_changed
from .. import lastuser_oauth
from ..forms.profile import ProfileMergeForm
from ..mailclient import send_email_verify_link
from ..views.helpers import login_internal, register_internal, set_loginmethod_cookie, requires_login
@csrf.exempt
@lastuser_oauth.route('/login/<service>', methods=['GET', 'POST'])
def login_service(service):
"""
Handle login with a registered service.
"""
if service not in login_registry:
abort(404)
provider = login_registry[service]
next_url = get_next_url(referrer=False, default=None)
callback_url = url_for('.login_service_callback', service=service, next=next_url, _external=True)
try:
return provider.do(callback_url=callback_url)
except (LoginInitError, LoginCallbackError) as e:
msg = _(u"{service} login failed: {error}").format(service=provider.title, error=unicode(e))
exception_catchall.send(e, message=msg)
flash(msg, category='danger')
return redirect(next_url or get_next_url(referrer=True))
@csrf.exempt
@lastuser_oauth.route('/login/<service>/callback', methods=['GET', 'POST'])
def login_service_callback(service):
"""
Callback handler for a login service.
"""
if service not in login_registry:
abort(404)
provider = login_registry[service]
try:
userdata = provider.callback()
except (LoginInitError, LoginCallbackError) as e:
msg = _(u"{service} login failed: {error}").format(service=provider.title, error=unicode(e))
exception_catchall.send(e, message=msg)
flash(msg, category='danger')
if g.user:
return redirect(get_next_url(referrer=False))
else:
return redirect(url_for('.login'))
return login_service_postcallback(service, userdata)
def get_user_extid(service, userdata):
"""
Retrieves a 'user', 'extid' and 'useremail' from the given service and userdata.
"""
provider = login_registry[service]
extid = getextid(service=service, userid=userdata['userid'])
useremail = None
if userdata.get('email'):
useremail = UserEmail.get(email=userdata['email'])
user = None
if extid is not None:
user = extid.user
elif useremail is not None and useremail.user is not None:
user = useremail.user
else:
# Cross-check with all other instances of the same LoginProvider (if we don't have a user)
# This is (for eg) for when we have two Twitter services with different access levels.
for other_service, other_provider in login_registry.items():
if other_service != service and other_provider.__class__ == provider.__class__:
other_extid = getextid(service=other_service, userid=userdata['userid'])
if other_extid is not None:
user = other_extid.user
break
# TODO: Make this work when we have multiple confirmed email addresses available
return user, extid, useremail
def login_service_postcallback(service, userdata):
user, extid, useremail = get_user_extid(service, userdata)
if extid is not None:
extid.oauth_token = userdata.get('oauth_token')
extid.oauth_token_secret = userdata.get('oauth_token_secret')
extid.oauth_token_type = userdata.get('oauth_token_type')
extid.username = userdata.get('username')
# TODO: Save refresh token and expiry date where present
extid.oauth_refresh_token = userdata.get('oauth_refresh_token')
extid.oauth_expiry_date = userdata.get('oauth_expiry_date')
extid.oauth_refresh_expiry = userdata.get('oauth_refresh_expiry') # TODO: Check this
else:
# New external id. Register it.
extid = UserExternalId(
user=user, # This may be None right now. Will be handled below
service=service,
userid=userdata['userid'],
username=userdata.get('username'),
oauth_token=userdata.get('oauth_token'),
oauth_token_secret=userdata.get('oauth_token_secret'),
oauth_token_type=userdata.get('oauth_token_type')
# TODO: Save refresh token
)
db.session.add(extid)
if user is None:
if g.user:
# Attach this id to currently logged-in user
user = g.user
extid.user = user
else:
# Register a new user
user = register_internal(None, userdata.get('fullname'), None)
extid.user = user
if userdata.get('username'):
if valid_username(userdata['username']) and user.is_valid_username(userdata['username']):
# Set a username for this user if it's available
user.username = userdata['username']
else: # This id is attached to a user
if g.user and g.user != user:
# Woah! Account merger handler required
# Always confirm with user before doing an account merger
session['merge_userid'] = user.userid
# Check for new email addresses
if userdata.get('email') and not useremail:
user.add_email(userdata['email'])
# If there are multiple email addresses, add any that are not already claimed.
# If they are already claimed by another user, this calls for an account merge
# request, but we can only merge two users at a time. Ask for a merge if there
# isn't already one pending
if userdata.get('emails'):
for email in userdata['emails']:
existing = UserEmail.get(email)
if existing:
if existing.user != user and 'merge_userid' not in session:
session['merge_userid'] = existing.user.userid
else:
user.add_email(email)
if userdata.get('emailclaim'):
emailclaim = UserEmailClaim(user=user, email=userdata['emailclaim'])
db.session.add(emailclaim)
send_email_verify_link(emailclaim)
# Is the user's fullname missing? Populate it.
if not user.fullname and userdata.get('fullname'):
user.fullname = userdata['fullname']
if not g.user: # If a user isn't already logged in, login now.
login_internal(user)
flash(_(u"You have logged in via {service}").format(service=login_registry[service].title), 'success')
next_url = get_next_url(session=True)
db.session.commit()
# Finally: set a login method cookie and send user on their way
if not user.is_profile_complete():
login_next = url_for('.profile_new', next=next_url)
else:
login_next = next_url
if 'merge_userid' in session:
return set_loginmethod_cookie(redirect(url_for('.profile_merge', next=login_next), code=303), service)
else:
return set_loginmethod_cookie(redirect(login_next, code=303), service)
@lastuser_oauth.route('/profile/merge', methods=['GET', 'POST'])
@requires_login
def profile_merge():
if 'merge_userid' not in session:
return redirect(get_next_url(), code=302)
other_user = User.get(userid=session['merge_userid'])
if other_user is None:
session.pop('merge_userid', None)
return redirect(get_next_url(), code=302)
form = ProfileMergeForm()
if form.validate_on_submit():
if 'merge' in request.form:
new_user = merge_users(g.user, other_user)
login_internal(new_user)
user_data_changed.send(new_user, changes=['merge'])
flash(_("Your accounts have been merged"), 'success')
session.pop('merge_userid', None)
db.session.commit()
return redirect(get_next_url(), code=303)
else:
session.pop('merge_userid', None)
return redirect(get_next_url(), code=303)
return render_template('merge.html', form=form, user=g.user, other_user=other_user,
login_registry=login_registry)
|
|
import os
import random
import pickle
import math
import tensorflow as tf
import cv2
import numpy as np
import matplotlib.pyplot as plt
def random_crop(src, size):
height = src.shape[0]
width = src.shape[1]
max_right = width - size[0] - 1
max_bottom = height - size[1] - 1
x = random.randint(0, max_right)
y = random.randint(0, max_bottom)
cropped = src[y: y + size[1], x: x + size[0]]
return cropped
def extract_patch_list(src, size, stride):
patch_list = []
height = src.shape[0]
width = src.shape[1]
size_w = size[0]
size_h = size[1]
stride_w = stride[0]
stride_h = stride[1]
w_q = (width - size_w) // stride_w
h_q = (height - size_h) // stride_h
for h in range(h_q):
for w in range(w_q):
patch = src[h * stride_h: h * stride_h + size_h,
w * stride_w: w * stride_w + size_w]
patch_list.append(patch)
return patch_list
def load_img_list(dir_path):
img_list = []
name_list = os.listdir(dir_path)
for name in name_list:
img_path = "{}/{}".format(dir_path, name)
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_list.append(img)
return img_list
def load_img_list_and_extract_patch_list(dir_path, size, stride):
patch_list_all = []
img_list = load_img_list(dir_path)
for img in img_list:
patch_list = extract_patch_list(img, size, stride)
patch_list_all.extend(patch_list)
return patch_list_all
def blur_img_list(img_list, scale=2):
result_list = []
for img in img_list:
height = img.shape[0]
width = img.shape[1]
w = round(width / scale)
h = round(height / scale)
result = cv2.resize(img, (w, h), interpolation=cv2.INTER_CUBIC)
result = cv2.resize(result, (width, height), interpolation=cv2.INTER_CUBIC)
result_list.append(result)
return result_list
"""
Model
"""
def conv2d(X, n_input, n_output, filter_size, activation=None, name=None, W=None, b=None):
with tf.variable_scope(name):
if W is None:
W = tf.get_variable(
name='W_1',
shape=[filter_size[0], filter_size[1], n_input, n_output],
initializer=tf.contrib.layers.xavier_initializer_conv2d())
if b is None:
b = tf.get_variable(
name='b_1',
shape=[n_output],
initializer=tf.constant_initializer(0.))
h = tf.nn.conv2d(X,
W,
strides=[1, 1, 1, 1],
padding='SAME'
)
if activation != None:
h = activation(tf.nn.bias_add(h, b))
return h, W, b
class USRCNN(object):
def __init__(self, sess):
self.sess = sess
self.shape = (41, 41, 3)
self.mean_img = None
self.std_img = None
self.min_loss = None
self.build_model()
sess.run(tf.global_variables_initializer())
self.saver = tf.train.Saver()
def load(self, sess, weights_path, meta_path):
self.saver.restore(sess, weights_path)
with open(meta_path, "rb") as f:
meta = pickle.load(f)
self.mean_img = meta['mean_img']
self.std_img = meta['std_img']
self.min_loss = meta['min_loss']
def save(self, sess, weights_path, meta_path, min_loss, flag_export_graph=False, graph_path=None):
meta = {
"mean_img": self.mean_img,
"std_img": self.std_img,
"shape": self.shape,
"min_loss": min_loss
}
with open(meta_path, "wb") as f:
pickle.dump(meta, f)
self.saver.save(sess, weights_path, latest_filename="recent.ckpt", write_meta_graph=flag_export_graph)
def build_model(self):
height = self.shape[0]
width = self.shape[1]
channel = self.shape[2]
X = tf.placeholder(tf.float32, shape=[None, height, width, channel], name='X')
Y = tf.placeholder(tf.float32, shape=[None, height, width, channel], name='Y')
start_learning_rate = tf.placeholder(tf.float32, name='learning_rate')
global_step = tf.Variable(0, trainable=False)
embeding_layer_info_list = [
{'name': 'embed/conv_1',
'n_input': 3,
'n_output': 128,
'filter_size': (3, 3),
'activation': tf.nn.relu},
{'name': 'embed/conv_2',
'n_input': 128,
'n_output': 128,
'filter_size': (3, 3),
'activation': tf.nn.relu},
]
inference_layer_info = {'name': 'inference/conv_1',
'n_input': 128,
'n_output': 128,
'filter_size': (3, 3),
'activation': tf.nn.relu}
reconstruction_layer_info = {'name': 'reconstruction/conv_1',
'n_input': 128,
'n_output': 3,
'filter_size': (3, 3),
'activation': None}
current_input = X
# embedding network
for info in embeding_layer_info_list:
current_input, _, _ = conv2d(X=current_input,
n_input=info['n_input'],
n_output=info['n_output'],
filter_size=info['filter_size'],
activation=info['activation'],
name=info['name'],
)
# inference network
inference_layer_output_list = []
info = inference_layer_info
recursion = 9
current_input, W, b = conv2d(X=current_input,
n_input=info['n_input'],
n_output=info['n_output'],
filter_size=info['filter_size'],
activation=info['activation'],
name=info['name'] + '/first',
)
for i in range(recursion):
current_input, _, _ = conv2d(X=current_input,
n_input=info['n_input'],
n_output=info['n_output'],
filter_size=info['filter_size'],
activation=info['activation'],
name=info['name'] + '/' + str(i),
W=W,
b=b)
inference_layer_output_list.append(current_input)
# reconstruction network
local_output_list = []
info = reconstruction_layer_info
for i, inference in enumerate(inference_layer_output_list):
local_output, _, _ = conv2d(X=inference,
n_input=info['n_input'],
n_output=info['n_output'],
filter_size=info['filter_size'],
activation=info['activation'],
name=info['name'] + "/inference_{}".format(i), )
local_output = tf.add(local_output, X)
local_output_5d = tf.expand_dims(local_output, 0)
local_output_list.append(local_output_5d)
local_output_concat = tf.concat(local_output_list,0)
print("local_output_concat shape : {}".format(local_output_concat.get_shape().as_list()))
average_img = tf.reduce_mean(local_output_concat, axis=0, name='average_output')
print("average_image shape : {}".format(average_img.get_shape().as_list()))
Y_pred = average_img
print("Y_pred shape : {}".format(Y_pred.get_shape().as_list()))
print("Y shape : {}".format(Y.get_shape().as_list()))
cost = tf.reduce_mean(tf.reduce_sum(tf.square(Y_pred - Y), axis=[1, 2, 3]), axis=0, name="reduce_mean_cost")
learning_rate = tf.train.exponential_decay(start_learning_rate, global_step,
10000, 0.96, staircase=True)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
self.X = X
self.Y = Y
self.Y_pred = Y_pred
self.cost = cost
self.optimizer = optimizer
self.start_learning_rate = start_learning_rate
self.gloabal_step = global_step
def train(self, X_train, Y_train, batch_size, n_epoch, start_learning_rate, save_dir_path, X_valid=None,
Y_valid=None):
fig, axs = plt.subplots(1, 4, figsize=(20, 6))
if self.min_loss is None:
self.min_loss = 999999999
# figure
epoch_list = []
loss_list = []
if self.mean_img is None or self.std_img is None:
self.mean_img = np.mean(Y_train, axis=0)
self.std_img = np.std(Y_train, axis=0)
print("make mean_img and std_img")
height = self.shape[0]
width = self.shape[1]
channel = self.shape[2]
test_img_source = (X_train, Y_train) if Y_valid is None else (X_valid, Y_valid)
test_img_idx = random.randint(0, len(test_img_source))
for epoch_i in range(n_epoch):
print("epoh_i : {}".format(epoch_i))
rand_idx_list = np.random.permutation(range(len(X_train)))
n_batch = len(rand_idx_list) // batch_size
for batch_i in range(n_batch):
rand_idx = rand_idx_list[batch_i * batch_size: (batch_i + 1) * batch_size]
batch_x = X_train[rand_idx]
batch_y = Y_train[rand_idx]
self.sess.run(self.optimizer,
feed_dict={self.X: (batch_x - self.mean_img) / self.std_img,
self.Y: (batch_y - self.mean_img) / self.std_img,
self.start_learning_rate: start_learning_rate})
loss = self.sess.run(self.cost, feed_dict={self.X: (X_valid - self.mean_img) / self.std_img,
self.Y: (Y_valid - self.mean_img) / self.std_img})
print("loss : {}".format(loss))
epoch_list.append(epoch_i)
loss_list.append(loss)
if loss < self.min_loss:
self.min_loss = loss
weights_path = "{}/weights".format(save_dir_path)
meta_path = "{}/meta_data.pickle".format(save_dir_path)
self.save(self.sess, weights_path=weights_path, meta_path=meta_path, min_loss=self.min_loss)
print("-" * 30)
print("Saved!")
print("weights_path : {}".format(weights_path))
print("meta_data_path : {}".format(meta_path))
print("-" * 30)
if epoch_i % 10 == 0:
test_img_origin = test_img_source[1][test_img_idx]
test_img_query = test_img_source[0][test_img_idx]
test_img_recon = np.reshape(test_img_query, [-1, height, width, channel])
test_img_recon = self.Y_pred.eval(feed_dict={self.X: (test_img_recon - self.mean_img) / self.std_img},
session=self.sess)
test_img_recon = np.reshape(test_img_recon, [height, width, channel])
test_img_recon = test_img_recon * self.std_img + self.mean_img
test_img_recon = np.clip(test_img_recon, 0, 255)
test_img_recon = test_img_recon.astype(np.uint8)
axs[0].imshow(test_img_origin)
axs[0].set_title("origin")
axs[1].imshow(test_img_query)
axs[1].set_title("query")
axs[2].imshow(test_img_recon)
axs[2].set_title("reconstructed image_{}".format(epoch_i))
axs[3].plot(epoch_list, loss_list)
axs[3].set_xlabel("epoch_i")
axs[3].set_ylabel("loss")
axs[3].set_title("loss_{}".format(epoch_i))
plt.pause(0.05)
return self.sess
def run(self, src):
expand_ratio = 1.2
times = 8
target = src
target = self.enhance_resolution(target)
for i in range(times):
shape = target.shape
height_resize = round(shape[0] * expand_ratio)
width_resize = round(shape[1] * expand_ratio)
target = cv2.resize(target, (width_resize, height_resize))
target = self.enhance_resolution(target)
return target
def enhance_resolution(self, src):
height = self.shape[0]
width = self.shape[1]
channel = self.shape[2]
mean_img = self.mean_img
std_img = self.std_img
patch_list, shape = self.divide_img_to_patch(src, (width, height))
patch_recon_list = []
for patch in patch_list:
patch_normalized = (patch - mean_img) / std_img
patch_normalized = patch_normalized.reshape([1, height, width, channel])
patch_recon = self.sess.run(self.Y_pred, feed_dict={self.X: patch_normalized})
patch_recon = np.reshape(patch_recon, [height, width, channel])
patch_recon = patch_recon * std_img + mean_img
patch_recon = np.clip(patch_recon, 0, 255)
patch_recon = patch_recon.astype(np.uint8)
patch_recon_list.append(patch_recon)
row_list = []
for row in range(shape[0]):
col_list = []
for col in range(shape[1]):
col_list.append(patch_recon_list[row * shape[1] + col])
row = np.concatenate(col_list, axis=1)
row_list.append(row)
recon_img = np.concatenate(row_list, axis=0)
recon_img = recon_img[:src.shape[0], :src.shape[1]]
return recon_img
def divide_img_to_patch(self, src, size):
patch_list = []
img_h = src.shape[0]
img_w = src.shape[1]
size_h = size[1]
size_w = size[0]
width_q = math.ceil(img_w / size_w)
height_q = math.ceil(img_h / size_h)
background = np.zeros(shape=(height_q * size_h, width_q * size_w, 3), dtype=src.dtype)
background[:img_h, :img_w] = src
src_with_background = background
shape = (height_q, width_q)
for h_i in range(height_q):
for w_i in range(width_q):
patch = src_with_background[h_i * size_h:(h_i + 1) * size_h, w_i * size_w: (w_i + 1) * size_w]
patch_list.append(patch)
return patch_list, shape
def run():
height = 50
width = 50
channel = 3
img_list = load_img_list_and_extract_patch_list("./data/urban_hr", (width, height))
X_all = np.array(blur_img_list(img_list, (width, height)))
Y_all = np.array(img_list)
mean_img = np.mean(Y_all, axis=0)
std_img = np.std(Y_all, axis=0)
# data
rand_idx = np.random.permutation(range(len(X_all)))
X_all = X_all[rand_idx]
Y_all = Y_all[rand_idx]
train_ratio = 0.8
valid_ratio = 0.1
test_ratio = 0.1
data_num = len(X_all)
train_data_num = round(data_num * train_ratio)
valid_data_num = round(data_num * valid_ratio)
test_data_num = round(data_num * test_ratio)
X_train = X_all[:train_data_num]
Y_train = Y_all[:train_data_num]
X_valid = X_all[train_data_num:train_data_num + valid_data_num]
Y_valid = Y_all[train_data_num:train_data_num + valid_data_num]
X_test = X_all[train_data_num + valid_data_num:train_data_num + valid_data_num + test_data_num]
Y_test = Y_all[train_data_num + valid_data_num:train_data_num + valid_data_num + test_data_num]
sess = tf.Session()
usrcnn = USRCNN(sess)
usrcnn.load(sess, './model/weights', './model/meta_data.pickle')
usrcnn.train(X_train, Y_train, X_valid=X_valid, Y_valid=Y_valid,
batch_size=64, n_epoch=3000, save_dir_path='./model')
return usrcnn
def test():
def load_img_list_and_extract_patch_list(dir_path):
name_list = os.listdir(dir_path)
img_list = []
for name in name_list:
img_path = "{}/{}".format(dir_path, name)
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_list.append(img)
return img_list
test_img_list = load_img_list_and_extract_patch_list("./data/celeba")
test_img = test_img_list[10]
test_img_resized = cv2.resize(test_img, (test_img.shape[1] // 3, test_img.shape[0] // 3),
interpolation=cv2.INTER_CUBIC)
test_img_resized = cv2.resize(test_img_resized, (test_img.shape[1], test_img.shape[0]),
interpolation=cv2.INTER_CUBIC)
with tf.Session() as sess:
usrcnn = USRCNN(sess)
usrcnn.load(sess, './model/weights', "./model/meta_data.pickle")
result = usrcnn.enhance_resolution(test_img_resized)
plt.imshow(result)
"""
def load_img_list_and_extract_patch_list(dir_path):
name_list = os.listdir(dir_path)
img_list = []
for name in name_list:
img_path = "{}/{}".format(dir_path, name)
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_list.append(img)
return img_list
test_img_list = load_img_list_and_extract_patch_list("./data/celeba")
test_img = test_img_list[10]
test_img_resized = cv2.resize(test_img, (test_img.shape[1]//3, test_img.shape[0]//3), interpolation = cv2.INTER_CUBIC)
test_img_resized = cv2.resize(test_img_resized, (test_img.shape[1],test_img.shape[0]), interpolation = cv2.INTER_CUBIC)
sess = tf.Session():
usrcnn = USRCNN(sess)
usrcnn.load(sess, './model/weights', "./model/meta_data.pickle")
result = usrcnn.enhance_resolution(test_img_resized)
plt.imshow(result)
"""
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class StorageAccountCredentialsOperations(object):
"""StorageAccountCredentialsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.databoxedge.v2020_09_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_data_box_edge_device(
self,
device_name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.StorageAccountCredentialList"]
"""Gets all the storage account credentials in a Data Box Edge/Data Box Gateway device.
Gets all the storage account credentials in a Data Box Edge/Data Box Gateway device.
:param device_name: The device name.
:type device_name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either StorageAccountCredentialList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.databoxedge.v2020_09_01_preview.models.StorageAccountCredentialList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccountCredentialList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_data_box_edge_device.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('StorageAccountCredentialList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_data_box_edge_device.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/storageAccountCredentials'} # type: ignore
def get(
self,
device_name, # type: str
name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.StorageAccountCredential"
"""Gets the properties of the specified storage account credential.
:param device_name: The device name.
:type device_name: str
:param name: The storage account credential name.
:type name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageAccountCredential, or the result of cls(response)
:rtype: ~azure.mgmt.databoxedge.v2020_09_01_preview.models.StorageAccountCredential
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccountCredential"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('StorageAccountCredential', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/storageAccountCredentials/{name}'} # type: ignore
def _create_or_update_initial(
self,
device_name, # type: str
name, # type: str
resource_group_name, # type: str
storage_account_credential, # type: "_models.StorageAccountCredential"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.StorageAccountCredential"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.StorageAccountCredential"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(storage_account_credential, 'StorageAccountCredential')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('StorageAccountCredential', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/storageAccountCredentials/{name}'} # type: ignore
def begin_create_or_update(
self,
device_name, # type: str
name, # type: str
resource_group_name, # type: str
storage_account_credential, # type: "_models.StorageAccountCredential"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.StorageAccountCredential"]
"""Creates or updates the storage account credential.
:param device_name: The device name.
:type device_name: str
:param name: The storage account credential name.
:type name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param storage_account_credential: The storage account credential.
:type storage_account_credential: ~azure.mgmt.databoxedge.v2020_09_01_preview.models.StorageAccountCredential
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either StorageAccountCredential or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.databoxedge.v2020_09_01_preview.models.StorageAccountCredential]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccountCredential"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
device_name=device_name,
name=name,
resource_group_name=resource_group_name,
storage_account_credential=storage_account_credential,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('StorageAccountCredential', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/storageAccountCredentials/{name}'} # type: ignore
def _delete_initial(
self,
device_name, # type: str
name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01-preview"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/storageAccountCredentials/{name}'} # type: ignore
def begin_delete(
self,
device_name, # type: str
name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the storage account credential.
:param device_name: The device name.
:type device_name: str
:param name: The storage account credential name.
:type name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
device_name=device_name,
name=name,
resource_group_name=resource_group_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/storageAccountCredentials/{name}'} # type: ignore
|
|
import urllib
from urlparse import urlparse, urlunparse, urlsplit
import sys
import os
import re
import mimetypes
import warnings
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.conf import settings
from django.contrib.auth import authenticate, login
from django.core.handlers.base import BaseHandler
from django.core.handlers.wsgi import WSGIRequest
from django.core.signals import got_request_exception
from django.http import SimpleCookie, HttpRequest, QueryDict
from django.template import TemplateDoesNotExist
from django.test import signals
from django.utils.functional import curry
from django.utils.encoding import smart_str
from django.utils.http import urlencode
from django.utils.importlib import import_module
from django.utils.itercompat import is_iterable
from django.db import transaction, close_connection
from django.test.utils import ContextList
__all__ = ('Client', 'RequestFactory', 'encode_file', 'encode_multipart')
BOUNDARY = 'BoUnDaRyStRiNg'
MULTIPART_CONTENT = 'multipart/form-data; boundary=%s' % BOUNDARY
CONTENT_TYPE_RE = re.compile('.*; charset=([\w\d-]+);?')
class FakePayload(object):
"""
A wrapper around StringIO that restricts what can be read since data from
the network can't be seeked and cannot be read outside of its content
length. This makes sure that views can't do anything under the test client
that wouldn't work in Real Life.
"""
def __init__(self, content):
self.__content = StringIO(content)
self.__len = len(content)
def read(self, num_bytes=None):
if num_bytes is None:
num_bytes = self.__len or 0
assert self.__len >= num_bytes, "Cannot read more than the available bytes from the HTTP incoming data."
content = self.__content.read(num_bytes)
self.__len -= num_bytes
return content
class ClientHandler(BaseHandler):
"""
A HTTP Handler that can be used for testing purposes.
Uses the WSGI interface to compose requests, but returns
the raw HttpResponse object
"""
def __init__(self, enforce_csrf_checks=True, *args, **kwargs):
self.enforce_csrf_checks = enforce_csrf_checks
super(ClientHandler, self).__init__(*args, **kwargs)
def __call__(self, environ):
from django.conf import settings
from django.core import signals
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._request_middleware is None:
self.load_middleware()
signals.request_started.send(sender=self.__class__)
try:
request = WSGIRequest(environ)
# sneaky little hack so that we can easily get round
# CsrfViewMiddleware. This makes life easier, and is probably
# required for backwards compatibility with external tests against
# admin views.
request._dont_enforce_csrf_checks = not self.enforce_csrf_checks
response = self.get_response(request)
finally:
signals.request_finished.disconnect(close_connection)
signals.request_finished.send(sender=self.__class__)
signals.request_finished.connect(close_connection)
return response
def store_rendered_templates(store, signal, sender, template, context, **kwargs):
"""
Stores templates and contexts that are rendered.
"""
store.setdefault('templates', []).append(template)
store.setdefault('context', ContextList()).append(context)
def encode_multipart(boundary, data):
"""
Encodes multipart POST data from a dictionary of form values.
The key will be used as the form data name; the value will be transmitted
as content. If the value is a file, the contents of the file will be sent
as an application/octet-stream; otherwise, str(value) will be sent.
"""
lines = []
to_str = lambda s: smart_str(s, settings.DEFAULT_CHARSET)
# Not by any means perfect, but good enough for our purposes.
is_file = lambda thing: hasattr(thing, "read") and callable(thing.read)
# Each bit of the multipart form data could be either a form value or a
# file, or a *list* of form values and/or files. Remember that HTTP field
# names can be duplicated!
for (key, value) in data.items():
if is_file(value):
lines.extend(encode_file(boundary, key, value))
elif not isinstance(value, basestring) and is_iterable(value):
for item in value:
if is_file(item):
lines.extend(encode_file(boundary, key, item))
else:
lines.extend([
'--' + boundary,
'Content-Disposition: form-data; name="%s"' % to_str(key),
'',
to_str(item)
])
else:
lines.extend([
'--' + boundary,
'Content-Disposition: form-data; name="%s"' % to_str(key),
'',
to_str(value)
])
lines.extend([
'--' + boundary + '--',
'',
])
return '\r\n'.join(lines)
def encode_file(boundary, key, file):
to_str = lambda s: smart_str(s, settings.DEFAULT_CHARSET)
if hasattr(file, 'content_type'):
content_type = file.content_type
else:
content_type = mimetypes.guess_type(file.name)[0]
if content_type is None:
content_type = 'application/octet-stream'
return [
'--' + boundary,
'Content-Disposition: form-data; name="%s"; filename="%s"' \
% (to_str(key), to_str(os.path.basename(file.name))),
'Content-Type: %s' % content_type,
'',
file.read()
]
class RequestFactory(object):
"""
Class that lets you create mock Request objects for use in testing.
Usage:
rf = RequestFactory()
get_request = rf.get('/hello/')
post_request = rf.post('/submit/', {'foo': 'bar'})
Once you have a request object you can pass it to any view function,
just as if that view had been hooked up using a URLconf.
"""
def __init__(self, **defaults):
self.defaults = defaults
self.cookies = SimpleCookie()
self.errors = StringIO()
def _base_environ(self, **request):
"""
The base environment for a request.
"""
environ = {
'HTTP_COOKIE': self.cookies.output(header='', sep='; '),
'PATH_INFO': '/',
'QUERY_STRING': '',
'REMOTE_ADDR': '127.0.0.1',
'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'SERVER_NAME': 'testserver',
'SERVER_PORT': '80',
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.version': (1,0),
'wsgi.url_scheme': 'http',
'wsgi.errors': self.errors,
'wsgi.multiprocess': True,
'wsgi.multithread': False,
'wsgi.run_once': False,
}
environ.update(self.defaults)
environ.update(request)
return environ
def request(self, **request):
"Construct a generic request object."
return WSGIRequest(self._base_environ(**request))
def _get_path(self, parsed):
# If there are parameters, add them
if parsed[3]:
return urllib.unquote(parsed[2] + ";" + parsed[3])
else:
return urllib.unquote(parsed[2])
def get(self, path, data={}, **extra):
"Construct a GET request"
parsed = urlparse(path)
r = {
'CONTENT_TYPE': 'text/html; charset=utf-8',
'PATH_INFO': self._get_path(parsed),
'QUERY_STRING': urlencode(data, doseq=True) or parsed[4],
'REQUEST_METHOD': 'GET',
'wsgi.input': FakePayload('')
}
r.update(extra)
return self.request(**r)
def post(self, path, data={}, content_type=MULTIPART_CONTENT,
**extra):
"Construct a POST request."
if content_type is MULTIPART_CONTENT:
post_data = encode_multipart(BOUNDARY, data)
else:
# Encode the content so that the byte representation is correct.
match = CONTENT_TYPE_RE.match(content_type)
if match:
charset = match.group(1)
else:
charset = settings.DEFAULT_CHARSET
post_data = smart_str(data, encoding=charset)
parsed = urlparse(path)
r = {
'CONTENT_LENGTH': len(post_data),
'CONTENT_TYPE': content_type,
'PATH_INFO': self._get_path(parsed),
'QUERY_STRING': parsed[4],
'REQUEST_METHOD': 'POST',
'wsgi.input': FakePayload(post_data),
}
r.update(extra)
return self.request(**r)
def head(self, path, data={}, **extra):
"Construct a HEAD request."
parsed = urlparse(path)
r = {
'CONTENT_TYPE': 'text/html; charset=utf-8',
'PATH_INFO': self._get_path(parsed),
'QUERY_STRING': urlencode(data, doseq=True) or parsed[4],
'REQUEST_METHOD': 'HEAD',
'wsgi.input': FakePayload('')
}
r.update(extra)
return self.request(**r)
def options(self, path, data={}, **extra):
"Constrict an OPTIONS request"
parsed = urlparse(path)
r = {
'PATH_INFO': self._get_path(parsed),
'QUERY_STRING': urlencode(data, doseq=True) or parsed[4],
'REQUEST_METHOD': 'OPTIONS',
'wsgi.input': FakePayload('')
}
r.update(extra)
return self.request(**r)
def put(self, path, data={}, content_type=MULTIPART_CONTENT,
**extra):
"Construct a PUT request."
if content_type is MULTIPART_CONTENT:
post_data = encode_multipart(BOUNDARY, data)
else:
post_data = data
# Make `data` into a querystring only if it's not already a string. If
# it is a string, we'll assume that the caller has already encoded it.
query_string = None
if not isinstance(data, basestring):
query_string = urlencode(data, doseq=True)
parsed = urlparse(path)
r = {
'CONTENT_LENGTH': len(post_data),
'CONTENT_TYPE': content_type,
'PATH_INFO': self._get_path(parsed),
'QUERY_STRING': query_string or parsed[4],
'REQUEST_METHOD': 'PUT',
'wsgi.input': FakePayload(post_data),
}
r.update(extra)
return self.request(**r)
def delete(self, path, data={}, **extra):
"Construct a DELETE request."
parsed = urlparse(path)
r = {
'PATH_INFO': self._get_path(parsed),
'QUERY_STRING': urlencode(data, doseq=True) or parsed[4],
'REQUEST_METHOD': 'DELETE',
'wsgi.input': FakePayload('')
}
r.update(extra)
return self.request(**r)
class Client(RequestFactory):
"""
A class that can act as a client for testing purposes.
It allows the user to compose GET and POST requests, and
obtain the response that the server gave to those requests.
The server Response objects are annotated with the details
of the contexts and templates that were rendered during the
process of serving the request.
Client objects are stateful - they will retain cookie (and
thus session) details for the lifetime of the Client instance.
This is not intended as a replacement for Twill/Selenium or
the like - it is here to allow testing against the
contexts and templates produced by a view, rather than the
HTML rendered to the end-user.
"""
def __init__(self, enforce_csrf_checks=False, **defaults):
super(Client, self).__init__(**defaults)
self.handler = ClientHandler(enforce_csrf_checks)
self.exc_info = None
def store_exc_info(self, **kwargs):
"""
Stores exceptions when they are generated by a view.
"""
self.exc_info = sys.exc_info()
def _session(self):
"""
Obtains the current session variables.
"""
if 'django.contrib.sessions' in settings.INSTALLED_APPS:
engine = import_module(settings.SESSION_ENGINE)
cookie = self.cookies.get(settings.SESSION_COOKIE_NAME, None)
if cookie:
return engine.SessionStore(cookie.value)
return {}
session = property(_session)
def request(self, **request):
"""
The master request method. Composes the environment dictionary
and passes to the handler, returning the result of the handler.
Assumes defaults for the query environment, which can be overridden
using the arguments to the request.
"""
environ = self._base_environ(**request)
# Curry a data dictionary into an instance of the template renderer
# callback function.
data = {}
on_template_render = curry(store_rendered_templates, data)
signals.template_rendered.connect(on_template_render, dispatch_uid="template-render")
# Capture exceptions created by the handler.
got_request_exception.connect(self.store_exc_info, dispatch_uid="request-exception")
try:
try:
response = self.handler(environ)
except TemplateDoesNotExist, e:
# If the view raises an exception, Django will attempt to show
# the 500.html template. If that template is not available,
# we should ignore the error in favor of re-raising the
# underlying exception that caused the 500 error. Any other
# template found to be missing during view error handling
# should be reported as-is.
if e.args != ('500.html',):
raise
# Look for a signalled exception, clear the current context
# exception data, then re-raise the signalled exception.
# Also make sure that the signalled exception is cleared from
# the local cache!
if self.exc_info:
exc_info = self.exc_info
self.exc_info = None
raise exc_info[1], None, exc_info[2]
# Save the client and request that stimulated the response.
response.client = self
response.request = request
# Add any rendered template detail to the response.
response.templates = data.get("templates", [])
response.context = data.get("context")
# Flatten a single context. Not really necessary anymore thanks to
# the __getattr__ flattening in ContextList, but has some edge-case
# backwards-compatibility implications.
if response.context and len(response.context) == 1:
response.context = response.context[0]
# Provide a backwards-compatible (but pending deprecation) response.template
def _get_template(self):
warnings.warn("response.template is deprecated; use response.templates instead (which is always a list)",
PendingDeprecationWarning, stacklevel=2)
if not self.templates:
return None
elif len(self.templates) == 1:
return self.templates[0]
return self.templates
response.__class__.template = property(_get_template)
# Update persistent cookie data.
if response.cookies:
self.cookies.update(response.cookies)
return response
finally:
signals.template_rendered.disconnect(dispatch_uid="template-render")
got_request_exception.disconnect(dispatch_uid="request-exception")
def get(self, path, data={}, follow=False, **extra):
"""
Requests a response from the server using GET.
"""
response = super(Client, self).get(path, data=data, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def post(self, path, data={}, content_type=MULTIPART_CONTENT,
follow=False, **extra):
"""
Requests a response from the server using POST.
"""
response = super(Client, self).post(path, data=data, content_type=content_type, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def head(self, path, data={}, follow=False, **extra):
"""
Request a response from the server using HEAD.
"""
response = super(Client, self).head(path, data=data, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def options(self, path, data={}, follow=False, **extra):
"""
Request a response from the server using OPTIONS.
"""
response = super(Client, self).options(path, data=data, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def put(self, path, data={}, content_type=MULTIPART_CONTENT,
follow=False, **extra):
"""
Send a resource to the server using PUT.
"""
response = super(Client, self).put(path, data=data, content_type=content_type, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def delete(self, path, data={}, follow=False, **extra):
"""
Send a DELETE request to the server.
"""
response = super(Client, self).delete(path, data=data, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def login(self, **credentials):
"""
Sets the Factory to appear as if it has successfully logged into a site.
Returns True if login is possible; False if the provided credentials
are incorrect, or the user is inactive, or if the sessions framework is
not available.
"""
user = authenticate(**credentials)
if user and user.is_active \
and 'django.contrib.sessions' in settings.INSTALLED_APPS:
engine = import_module(settings.SESSION_ENGINE)
# Create a fake request to store login details.
request = HttpRequest()
if self.session:
request.session = self.session
else:
request.session = engine.SessionStore()
login(request, user)
# Save the session values.
request.session.save()
# Set the cookie to represent the session.
session_cookie = settings.SESSION_COOKIE_NAME
self.cookies[session_cookie] = request.session.session_key
cookie_data = {
'max-age': None,
'path': '/',
'domain': settings.SESSION_COOKIE_DOMAIN,
'secure': settings.SESSION_COOKIE_SECURE or None,
'expires': None,
}
self.cookies[session_cookie].update(cookie_data)
return True
else:
return False
def logout(self):
"""
Removes the authenticated user's cookies and session object.
Causes the authenticated user to be logged out.
"""
session = import_module(settings.SESSION_ENGINE).SessionStore()
session_cookie = self.cookies.get(settings.SESSION_COOKIE_NAME)
if session_cookie:
session.delete(session_key=session_cookie.value)
self.cookies = SimpleCookie()
def _handle_redirects(self, response, **extra):
"Follows any redirects by requesting responses from the server using GET."
response.redirect_chain = []
while response.status_code in (301, 302, 303, 307):
url = response['Location']
scheme, netloc, path, query, fragment = urlsplit(url)
redirect_chain = response.redirect_chain
redirect_chain.append((url, response.status_code))
if scheme:
extra['wsgi.url_scheme'] = scheme
# The test client doesn't handle external links,
# but since the situation is simulated in test_client,
# we fake things here by ignoring the netloc portion of the
# redirected URL.
response = self.get(path, QueryDict(query), follow=False, **extra)
response.redirect_chain = redirect_chain
# Prevent loops
if response.redirect_chain[-1] in response.redirect_chain[0:-1]:
break
return response
|
|
# Copyright 2013-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the mongo_client module."""
import contextlib
import datetime
import os
import socket
import struct
import sys
import time
import traceback
import warnings
sys.path[0:0] = [""]
from bson import BSON
from bson.codec_options import CodecOptions
from bson.py3compat import thread, u
from bson.son import SON
from bson.tz_util import utc
from pymongo import auth, message
from pymongo.cursor import CursorType
from pymongo.database import Database
from pymongo.errors import (AutoReconnect,
ConfigurationError,
ConnectionFailure,
InvalidName,
OperationFailure,
CursorNotFound,
NetworkTimeout,
InvalidURI)
from pymongo.mongo_client import MongoClient
from pymongo.pool import SocketInfo
from pymongo.read_preferences import ReadPreference
from pymongo.server_selectors import (any_server_selector,
writable_server_selector)
from pymongo.server_type import SERVER_TYPE
from pymongo.write_concern import WriteConcern
from test import (client_context,
client_knobs,
host,
pair,
port,
SkipTest,
unittest,
IntegrationTest,
db_pwd,
db_user,
MockClientTest)
from test.pymongo_mocks import MockClient
from test.utils import (assertRaisesExactly,
delay,
remove_all_users,
server_is_master_with_slave,
get_pool,
one,
connected,
wait_until,
rs_or_single_client,
rs_or_single_client_noauth,
lazy_client_trial,
NTHREADS)
class ClientUnitTest(unittest.TestCase):
"""MongoClient tests that don't require a server."""
@classmethod
def setUpClass(cls):
cls.client = MongoClient(host, port, connect=False,
serverSelectionTimeoutMS=100)
def test_keyword_arg_defaults(self):
client = MongoClient(socketTimeoutMS=None,
connectTimeoutMS=20000,
waitQueueTimeoutMS=None,
waitQueueMultiple=None,
socketKeepAlive=False,
replicaSet=None,
read_preference=ReadPreference.PRIMARY,
ssl=False,
ssl_keyfile=None,
ssl_certfile=None,
ssl_cert_reqs=0, # ssl.CERT_NONE
ssl_ca_certs=None,
connect=False,
serverSelectionTimeoutMS=12000)
options = client._MongoClient__options
pool_opts = options.pool_options
self.assertEqual(None, pool_opts.socket_timeout)
# socket.Socket.settimeout takes a float in seconds
self.assertEqual(20.0, pool_opts.connect_timeout)
self.assertEqual(None, pool_opts.wait_queue_timeout)
self.assertEqual(None, pool_opts.wait_queue_multiple)
self.assertFalse(pool_opts.socket_keepalive)
self.assertEqual(None, pool_opts.ssl_context)
self.assertEqual(None, options.replica_set_name)
self.assertEqual(ReadPreference.PRIMARY, client.read_preference)
self.assertAlmostEqual(12, client.server_selection_timeout)
def test_types(self):
self.assertRaises(TypeError, MongoClient, 1)
self.assertRaises(TypeError, MongoClient, 1.14)
self.assertRaises(TypeError, MongoClient, "localhost", "27017")
self.assertRaises(TypeError, MongoClient, "localhost", 1.14)
self.assertRaises(TypeError, MongoClient, "localhost", [])
self.assertRaises(ConfigurationError, MongoClient, [])
def test_max_pool_size_zero(self):
with self.assertRaises(ValueError):
MongoClient(maxPoolSize=0)
def test_get_db(self):
def make_db(base, name):
return base[name]
self.assertRaises(InvalidName, make_db, self.client, "")
self.assertRaises(InvalidName, make_db, self.client, "te$t")
self.assertRaises(InvalidName, make_db, self.client, "te.t")
self.assertRaises(InvalidName, make_db, self.client, "te\\t")
self.assertRaises(InvalidName, make_db, self.client, "te/t")
self.assertRaises(InvalidName, make_db, self.client, "te st")
self.assertTrue(isinstance(self.client.test, Database))
self.assertEqual(self.client.test, self.client["test"])
self.assertEqual(self.client.test, Database(self.client, "test"))
def test_get_database(self):
codec_options = CodecOptions(tz_aware=True)
write_concern = WriteConcern(w=2, j=True)
db = self.client.get_database(
'foo', codec_options, ReadPreference.SECONDARY, write_concern)
self.assertEqual('foo', db.name)
self.assertEqual(codec_options, db.codec_options)
self.assertEqual(ReadPreference.SECONDARY, db.read_preference)
self.assertEqual(write_concern, db.write_concern)
def test_getattr(self):
self.assertTrue(isinstance(self.client['_does_not_exist'], Database))
with self.assertRaises(AttributeError) as context:
self.client._does_not_exist
# Message should be:
# "AttributeError: MongoClient has no attribute '_does_not_exist'. To
# access the _does_not_exist database, use client['_does_not_exist']".
self.assertIn("has no attribute '_does_not_exist'",
str(context.exception))
def test_iteration(self):
def iterate():
[a for a in self.client]
self.assertRaises(TypeError, iterate)
def test_get_default_database(self):
c = MongoClient("mongodb://%s:%d/foo" % (host, port), connect=False)
self.assertEqual(Database(c, 'foo'), c.get_default_database())
def test_get_default_database_error(self):
# URI with no database.
c = MongoClient("mongodb://%s:%d/" % (host, port), connect=False)
self.assertRaises(ConfigurationError, c.get_default_database)
def test_get_default_database_with_authsource(self):
# Ensure we distinguish database name from authSource.
uri = "mongodb://%s:%d/foo?authSource=src" % (host, port)
c = MongoClient(uri, connect=False)
self.assertEqual(Database(c, 'foo'), c.get_default_database())
class TestClient(IntegrationTest):
def test_constants(self):
# Set bad defaults.
MongoClient.HOST = "somedomainthatdoesntexist.org"
MongoClient.PORT = 123456789
with self.assertRaises(AutoReconnect):
connected(MongoClient(serverSelectionTimeoutMS=10))
# Override the defaults. No error.
connected(MongoClient(host, port))
# Set good defaults.
MongoClient.HOST = host
MongoClient.PORT = port
# No error.
connected(MongoClient())
def test_init_disconnected(self):
c = rs_or_single_client(connect=False)
self.assertIsInstance(c.is_primary, bool)
self.assertIsInstance(c.is_mongos, bool)
self.assertIsInstance(c.max_pool_size, int)
self.assertIsInstance(c.nodes, frozenset)
self.assertEqual(c.codec_options, CodecOptions())
self.assertIsInstance(c.max_bson_size, int)
self.assertIsInstance(c.max_write_batch_size, int)
self.assertFalse(c.primary)
self.assertFalse(c.secondaries)
c.pymongo_test.command('ismaster') # Auto-connect.
if client_context.is_rs:
# The primary's host and port are from the replica set config.
self.assertIsNotNone(c.address)
else:
self.assertEqual(c.address, (host, port))
bad_host = "somedomainthatdoesntexist.org"
c = MongoClient(bad_host, port, connectTimeoutMS=1,
serverSelectionTimeoutMS=10)
self.assertRaises(ConnectionFailure, c.pymongo_test.test.find_one)
def test_init_disconnected_with_auth(self):
uri = "mongodb://user:pass@somedomainthatdoesntexist"
c = MongoClient(uri, connectTimeoutMS=1,
serverSelectionTimeoutMS=10)
self.assertRaises(ConnectionFailure, c.pymongo_test.test.find_one)
def test_equality(self):
c = connected(rs_or_single_client())
self.assertEqual(client_context.rs_or_standalone_client, c)
# Explicitly test inequality
self.assertFalse(client_context.rs_or_standalone_client != c)
def test_host_w_port(self):
with self.assertRaises(AutoReconnect):
connected(MongoClient("%s:1234567" % host, connectTimeoutMS=1,
serverSelectionTimeoutMS=10))
def test_repr(self):
# Making host a str avoids the 'u' prefix in Python 2, so the repr is
# the same in Python 2 and 3.
self.assertEqual(repr(MongoClient(str(host), port)),
"MongoClient('%s', %d)" % (host, port))
@client_context.require_replica_set
def test_repr_replica_set(self):
# Like MongoClient(["localhost:27017", "localhost:27018"]).
self.assertIn("MongoClient([", repr(self.client))
for node in client_context.nodes:
self.assertIn("%s:%d" % node, repr(self.client))
def test_getters(self):
self.assertEqual(client_context.client.address, (host, port))
self.assertEqual(client_context.nodes, self.client.nodes)
def test_database_names(self):
self.client.pymongo_test.test.insert_one({"dummy": u("object")})
self.client.pymongo_test_mike.test.insert_one({"dummy": u("object")})
dbs = self.client.database_names()
self.assertTrue("pymongo_test" in dbs)
self.assertTrue("pymongo_test_mike" in dbs)
def test_drop_database(self):
self.assertRaises(TypeError, self.client.drop_database, 5)
self.assertRaises(TypeError, self.client.drop_database, None)
self.client.pymongo_test.test.insert_one({"dummy": u("object")})
self.client.pymongo_test2.test.insert_one({"dummy": u("object")})
dbs = self.client.database_names()
self.assertIn("pymongo_test", dbs)
self.assertIn("pymongo_test2", dbs)
self.client.drop_database("pymongo_test")
self.client.drop_database(self.client.pymongo_test2)
raise SkipTest("This test often fails due to SERVER-2329")
dbs = self.client.database_names()
self.assertNotIn("pymongo_test", dbs)
self.assertNotIn("pymongo_test2", dbs)
def test_close(self):
coll = self.client.pymongo_test.bar
self.client.close()
self.client.close()
coll.count()
self.client.close()
self.client.close()
coll.count()
def test_bad_uri(self):
with self.assertRaises(InvalidURI):
MongoClient("http://localhost")
@client_context.require_auth
def test_auth_from_uri(self):
self.client.admin.add_user("admin", "pass", roles=["root"])
self.addCleanup(self.client.admin.remove_user, 'admin')
self.addCleanup(remove_all_users, self.client.pymongo_test)
self.client.pymongo_test.add_user(
"user", "pass", roles=['userAdmin', 'readWrite'])
with self.assertRaises(OperationFailure):
connected(rs_or_single_client(
"mongodb://a:b@%s:%d" % (host, port)))
# No error.
connected(rs_or_single_client_noauth(
"mongodb://admin:pass@%s:%d" % (host, port)))
# Wrong database.
uri = "mongodb://admin:pass@%s:%d/pymongo_test" % (host, port)
with self.assertRaises(OperationFailure):
connected(rs_or_single_client(uri))
# No error.
connected(rs_or_single_client_noauth(
"mongodb://user:pass@%s:%d/pymongo_test" % (host, port)))
# Auth with lazy connection.
rs_or_single_client(
"mongodb://user:pass@%s:%d/pymongo_test" % (host, port),
connect=False).pymongo_test.test.find_one()
# Wrong password.
bad_client = rs_or_single_client(
"mongodb://user:wrong@%s:%d/pymongo_test" % (host, port),
connect=False)
self.assertRaises(OperationFailure,
bad_client.pymongo_test.test.find_one)
@client_context.require_auth
def test_multiple_logins(self):
self.client.pymongo_test.add_user('user1', 'pass', roles=['readWrite'])
self.client.pymongo_test.add_user('user2', 'pass', roles=['readWrite'])
self.addCleanup(remove_all_users, self.client.pymongo_test)
client = rs_or_single_client_noauth(
"mongodb://user1:pass@%s:%d/pymongo_test" % (host, port))
client.pymongo_test.test.find_one()
with self.assertRaises(OperationFailure):
# Can't log in to the same database with multiple users.
client.pymongo_test.authenticate('user2', 'pass')
client.pymongo_test.test.find_one()
client.pymongo_test.logout()
with self.assertRaises(OperationFailure):
client.pymongo_test.test.find_one()
client.pymongo_test.authenticate('user2', 'pass')
client.pymongo_test.test.find_one()
with self.assertRaises(OperationFailure):
client.pymongo_test.authenticate('user1', 'pass')
client.pymongo_test.test.find_one()
@client_context.require_auth
def test_lazy_auth_raises_operation_failure(self):
lazy_client = rs_or_single_client(
"mongodb://user:wrong@%s/pymongo_test" % host, connect=False)
assertRaisesExactly(
OperationFailure, lazy_client.test.collection.find_one)
def test_unix_socket(self):
if not hasattr(socket, "AF_UNIX"):
raise SkipTest("UNIX-sockets are not supported on this system")
mongodb_socket = '/tmp/mongodb-27017.sock'
if not os.access(mongodb_socket, os.R_OK):
raise SkipTest("Socket file is not accessible")
if client_context.auth_enabled:
uri = "mongodb://%s:%s@%s" % (db_user, db_pwd, mongodb_socket)
else:
uri = "mongodb://%s" % mongodb_socket
# Confirm we can do operations via the socket.
client = MongoClient(uri)
client.pymongo_test.test.insert_one({"dummy": "object"})
dbs = client.database_names()
self.assertTrue("pymongo_test" in dbs)
# Confirm it fails with a missing socket.
self.assertRaises(
ConnectionFailure,
connected, MongoClient("mongodb:///tmp/non-existent.sock",
serverSelectionTimeoutMS=100))
def test_fork(self):
# Test using a client before and after a fork.
if sys.platform == "win32":
raise SkipTest("Can't fork on windows")
try:
import multiprocessing
except ImportError:
raise SkipTest("No multiprocessing module")
db = self.client.pymongo_test
# Ensure a socket is opened before the fork.
db.test.find_one()
def f(pipe):
try:
kill_cursors_executor = self.client._kill_cursors_executor
servers = self.client._topology.select_servers(
any_server_selector)
# In child, only the thread that called fork() is alive.
# The first operation should revive the rest.
db.test.find_one()
wait_until(
lambda: all(s._monitor._executor._thread.is_alive()
for s in servers),
"restart monitor threads")
wait_until(lambda: kill_cursors_executor._thread.is_alive(),
"restart kill-cursors executor")
except:
traceback.print_exc() # Aid debugging.
pipe.send(True)
parent_pipe, child_pipe = multiprocessing.Pipe()
p = multiprocessing.Process(target=f, args=(child_pipe,))
p.start()
p.join(10)
child_pipe.close()
# Pipe will only have data if the child process failed.
try:
parent_pipe.recv()
self.fail()
except EOFError:
pass
def test_document_class(self):
c = self.client
db = c.pymongo_test
db.test.insert_one({"x": 1})
self.assertEqual(dict, c.codec_options.document_class)
self.assertTrue(isinstance(db.test.find_one(), dict))
self.assertFalse(isinstance(db.test.find_one(), SON))
c = rs_or_single_client(document_class=SON)
db = c.pymongo_test
self.assertEqual(SON, c.codec_options.document_class)
self.assertTrue(isinstance(db.test.find_one(), SON))
def test_timeouts(self):
client = rs_or_single_client(connectTimeoutMS=10500)
self.assertEqual(10.5, get_pool(client).opts.connect_timeout)
client = rs_or_single_client(socketTimeoutMS=10500)
self.assertEqual(10.5, get_pool(client).opts.socket_timeout)
def test_socket_timeout_ms_validation(self):
c = rs_or_single_client(socketTimeoutMS=10 * 1000)
self.assertEqual(10, get_pool(c).opts.socket_timeout)
c = connected(rs_or_single_client(socketTimeoutMS=None))
self.assertEqual(None, get_pool(c).opts.socket_timeout)
self.assertRaises(ValueError,
rs_or_single_client, socketTimeoutMS=0)
self.assertRaises(ValueError,
rs_or_single_client, socketTimeoutMS=-1)
self.assertRaises(ValueError,
rs_or_single_client, socketTimeoutMS=1e10)
self.assertRaises(ValueError,
rs_or_single_client, socketTimeoutMS='foo')
def test_socket_timeout(self):
no_timeout = self.client
timeout_sec = 1
timeout = rs_or_single_client(socketTimeoutMS=1000 * timeout_sec)
no_timeout.pymongo_test.drop_collection("test")
no_timeout.pymongo_test.test.insert_one({"x": 1})
# A $where clause that takes a second longer than the timeout
where_func = delay(timeout_sec + 1)
def get_x(db):
doc = next(db.test.find().where(where_func))
return doc["x"]
self.assertEqual(1, get_x(no_timeout.pymongo_test))
self.assertRaises(NetworkTimeout, get_x, timeout.pymongo_test)
def test_server_selection_timeout(self):
client = MongoClient(serverSelectionTimeoutMS=100, connect=False)
self.assertAlmostEqual(0.1, client.server_selection_timeout)
client = MongoClient(serverSelectionTimeoutMS=0, connect=False)
self.assertAlmostEqual(0, client.server_selection_timeout)
self.assertRaises(ValueError, MongoClient,
serverSelectionTimeoutMS="foo", connect=False)
self.assertRaises(ValueError, MongoClient,
serverSelectionTimeoutMS=-1, connect=False)
self.assertRaises(ConfigurationError, MongoClient,
serverSelectionTimeoutMS=None, connect=False)
client = MongoClient(
'mongodb://localhost/?serverSelectionTimeoutMS=100', connect=False)
self.assertAlmostEqual(0.1, client.server_selection_timeout)
client = MongoClient(
'mongodb://localhost/?serverSelectionTimeoutMS=0', connect=False)
self.assertAlmostEqual(0, client.server_selection_timeout)
self.assertRaises(ValueError, MongoClient,
'mongodb://localhost/?serverSelectionTimeoutMS=-1',
connect=False)
self.assertRaises(ValueError, MongoClient,
'mongodb://localhost/?serverSelectionTimeoutMS=',
connect=False)
def test_waitQueueTimeoutMS(self):
client = rs_or_single_client(waitQueueTimeoutMS=2000)
self.assertEqual(get_pool(client).opts.wait_queue_timeout, 2)
def test_waitQueueMultiple(self):
client = rs_or_single_client(maxPoolSize=3, waitQueueMultiple=2)
pool = get_pool(client)
self.assertEqual(pool.opts.wait_queue_multiple, 2)
self.assertEqual(pool._socket_semaphore.waiter_semaphore.counter, 6)
def test_socketKeepAlive(self):
client = rs_or_single_client(socketKeepAlive=True)
self.assertTrue(get_pool(client).opts.socket_keepalive)
def test_tz_aware(self):
self.assertRaises(ValueError, MongoClient, tz_aware='foo')
aware = rs_or_single_client(tz_aware=True)
naive = self.client
aware.pymongo_test.drop_collection("test")
now = datetime.datetime.utcnow()
aware.pymongo_test.test.insert_one({"x": now})
self.assertEqual(None, naive.pymongo_test.test.find_one()["x"].tzinfo)
self.assertEqual(utc, aware.pymongo_test.test.find_one()["x"].tzinfo)
self.assertEqual(
aware.pymongo_test.test.find_one()["x"].replace(tzinfo=None),
naive.pymongo_test.test.find_one()["x"])
@client_context.require_ipv6
def test_ipv6(self):
if client_context.auth_enabled:
auth_str = "%s:%s@" % (db_user, db_pwd)
else:
auth_str = ""
uri = "mongodb://%s[::1]:%d" % (auth_str, port)
if client_context.is_rs:
uri += '/?replicaSet=' + client_context.replica_set_name
client = rs_or_single_client_noauth(uri)
client.pymongo_test.test.insert_one({"dummy": u("object")})
client.pymongo_test_bernie.test.insert_one({"dummy": u("object")})
dbs = client.database_names()
self.assertTrue("pymongo_test" in dbs)
self.assertTrue("pymongo_test_bernie" in dbs)
@client_context.require_no_mongos
def test_fsync_lock_unlock(self):
if (server_is_master_with_slave(client_context.client) and
client_context.version.at_least(2, 3, 0)):
raise SkipTest('SERVER-7714')
self.assertFalse(self.client.is_locked)
# async flushing not supported on windows...
if sys.platform not in ('cygwin', 'win32'):
self.client.fsync(async=True)
self.assertFalse(self.client.is_locked)
self.client.fsync(lock=True)
self.assertTrue(self.client.is_locked)
locked = True
self.client.unlock()
for _ in range(5):
locked = self.client.is_locked
if not locked:
break
time.sleep(1)
self.assertFalse(locked)
def test_contextlib(self):
client = rs_or_single_client()
client.pymongo_test.drop_collection("test")
client.pymongo_test.test.insert_one({"foo": "bar"})
# The socket used for the previous commands has been returned to the
# pool
self.assertEqual(1, len(get_pool(client).sockets))
with contextlib.closing(client):
self.assertEqual("bar", client.pymongo_test.test.find_one()["foo"])
self.assertEqual(1, len(get_pool(client).sockets))
self.assertEqual(0, len(get_pool(client).sockets))
with client as client:
self.assertEqual("bar", client.pymongo_test.test.find_one()["foo"])
self.assertEqual(0, len(get_pool(client).sockets))
def test_interrupt_signal(self):
if sys.platform.startswith('java'):
# We can't figure out how to raise an exception on a thread that's
# blocked on a socket, whether that's the main thread or a worker,
# without simply killing the whole thread in Jython. This suggests
# PYTHON-294 can't actually occur in Jython.
raise SkipTest("Can't test interrupts in Jython")
# Test fix for PYTHON-294 -- make sure MongoClient closes its
# socket if it gets an interrupt while waiting to recv() from it.
db = self.client.pymongo_test
# A $where clause which takes 1.5 sec to execute
where = delay(1.5)
# Need exactly 1 document so find() will execute its $where clause once
db.drop_collection('foo')
db.foo.insert_one({'_id': 1})
def interrupter():
# Raises KeyboardInterrupt in the main thread
time.sleep(0.25)
thread.interrupt_main()
thread.start_new_thread(interrupter, ())
raised = False
try:
# Will be interrupted by a KeyboardInterrupt.
next(db.foo.find({'$where': where}))
except KeyboardInterrupt:
raised = True
# Can't use self.assertRaises() because it doesn't catch system
# exceptions
self.assertTrue(raised, "Didn't raise expected KeyboardInterrupt")
# Raises AssertionError due to PYTHON-294 -- Mongo's response to the
# previous find() is still waiting to be read on the socket, so the
# request id's don't match.
self.assertEqual(
{'_id': 1},
next(db.foo.find())
)
def test_operation_failure(self):
# Ensure MongoClient doesn't close socket after it gets an error
# response to getLastError. PYTHON-395.
pool = get_pool(self.client)
socket_count = len(pool.sockets)
self.assertGreaterEqual(socket_count, 1)
old_sock_info = next(iter(pool.sockets))
self.client.pymongo_test.test.drop()
self.client.pymongo_test.test.insert_one({'_id': 'foo'})
self.assertRaises(
OperationFailure,
self.client.pymongo_test.test.insert_one, {'_id': 'foo'})
self.assertEqual(socket_count, len(pool.sockets))
new_sock_info = next(iter(pool.sockets))
self.assertEqual(old_sock_info, new_sock_info)
def test_kill_cursors(self):
if (client_context.is_mongos
and not client_context.version.at_least(2, 4, 7)):
# Old mongos sends incorrectly formatted error response when
# cursor isn't found, see SERVER-9738.
raise SkipTest("Can't test kill_cursors against old mongos")
self.collection = self.client.pymongo_test.test
self.collection.drop()
self.collection.insert_many([{'_id': i} for i in range(200)])
cursor = self.collection.find().batch_size(1)
next(cursor)
self.client.kill_cursors([cursor.cursor_id])
# Prevent killcursors from reaching the server while a getmore is in
# progress -- the server logs "Assertion: 16089:Cannot kill active
# cursor."
time.sleep(2)
def raises_cursor_not_found():
try:
next(cursor)
return False
except CursorNotFound:
return True
wait_until(raises_cursor_not_found, 'close cursor')
def test_kill_cursors_with_server_unavailable(self):
with client_knobs(kill_cursor_frequency=9999999):
client = MongoClient('doesnt exist', connect=False,
serverSelectionTimeoutMS=0)
# Wait for the first tick of the periodic kill-cursors to pass.
time.sleep(1)
# Enqueue a kill-cursors message.
client.close_cursor(1234, ('doesnt-exist', 27017))
with warnings.catch_warnings(record=True) as user_warnings:
client._process_kill_cursors_queue()
self.assertIn("couldn't close cursor on ('doesnt-exist', 27017)",
str(user_warnings[0].message))
def test_lazy_connect_w0(self):
# Ensure that connect-on-demand works when the first operation is
# an unacknowledged write. This exercises _writable_max_wire_version().
# Use a separate collection to avoid races where we're still
# completing an operation on a collection while the next test begins.
client = rs_or_single_client(connect=False, w=0)
client.test_lazy_connect_w0.test.insert_one({})
client = rs_or_single_client(connect=False)
client.test_lazy_connect_w0.test.update_one({}, {'$set': {'x': 1}})
client = rs_or_single_client(connect=False)
client.test_lazy_connect_w0.test.delete_one({})
@client_context.require_no_mongos
def test_exhaust_network_error(self):
# When doing an exhaust query, the socket stays checked out on success
# but must be checked in on error to avoid semaphore leaks.
client = rs_or_single_client(maxPoolSize=1)
collection = client.pymongo_test.test
pool = get_pool(client)
pool._check_interval_seconds = None # Never check.
# Ensure a socket.
connected(client)
# Cause a network error.
sock_info = one(pool.sockets)
sock_info.sock.close()
cursor = collection.find(cursor_type=CursorType.EXHAUST)
with self.assertRaises(ConnectionFailure):
next(cursor)
self.assertTrue(sock_info.closed)
# The semaphore was decremented despite the error.
self.assertTrue(pool._socket_semaphore.acquire(blocking=False))
@client_context.require_auth
def test_auth_network_error(self):
# Make sure there's no semaphore leak if we get a network error
# when authenticating a new socket with cached credentials.
# Get a client with one socket so we detect if it's leaked.
c = connected(rs_or_single_client(maxPoolSize=1,
waitQueueTimeoutMS=1))
# Simulate an authenticate() call on a different socket.
credentials = auth._build_credentials_tuple(
'DEFAULT', 'admin', db_user, db_pwd, {})
c._cache_credentials('test', credentials, connect=False)
# Cause a network error on the actual socket.
pool = get_pool(c)
socket_info = one(pool.sockets)
socket_info.sock.close()
# SocketInfo.check_auth logs in with the new credential, but gets a
# socket.error. Should be reraised as AutoReconnect.
self.assertRaises(AutoReconnect, c.test.collection.find_one)
# No semaphore leak, the pool is allowed to make a new socket.
c.test.collection.find_one()
@client_context.require_no_replica_set
def test_connect_to_standalone_using_replica_set_name(self):
client = MongoClient(pair, replicaSet='anything',
serverSelectionTimeoutMS=100)
with self.assertRaises(AutoReconnect):
client.test.test.find_one()
@client_context.require_replica_set
def test_stale_getmore(self):
# A cursor is created, but its member goes down and is removed from
# the topology before the getMore message is sent. Test that
# MongoClient._send_message_with_response handles the error.
with self.assertRaises(AutoReconnect):
client = MongoClient(host, port, connect=False,
serverSelectionTimeoutMS=100,
replicaSet=client_context.replica_set_name)
client._send_message_with_response(
operation=message._GetMore('collection', 101, 1234),
address=('not-a-member', 27017))
class TestExhaustCursor(IntegrationTest):
"""Test that clients properly handle errors from exhaust cursors."""
def setUp(self):
super(TestExhaustCursor, self).setUp()
if client_context.is_mongos:
raise SkipTest("mongos doesn't support exhaust, SERVER-2627")
# mongod < 2.2.0 closes exhaust socket on error, so it behaves like
# test_exhaust_query_network_error. Here we test that on query error
# the client correctly keeps the socket *open* and checks it in.
@client_context.require_version_min(2, 2, 0)
def test_exhaust_query_server_error(self):
# When doing an exhaust query, the socket stays checked out on success
# but must be checked in on error to avoid semaphore leaks.
client = connected(rs_or_single_client(maxPoolSize=1))
collection = client.pymongo_test.test
pool = get_pool(client)
sock_info = one(pool.sockets)
# This will cause OperationFailure in all mongo versions since
# the value for $orderby must be a document.
cursor = collection.find(
SON([('$query', {}), ('$orderby', True)]),
cursor_type=CursorType.EXHAUST)
self.assertRaises(OperationFailure, cursor.next)
self.assertFalse(sock_info.closed)
# The socket was checked in and the semaphore was decremented.
self.assertIn(sock_info, pool.sockets)
self.assertTrue(pool._socket_semaphore.acquire(blocking=False))
def test_exhaust_getmore_server_error(self):
# When doing a getmore on an exhaust cursor, the socket stays checked
# out on success but it's checked in on error to avoid semaphore leaks.
client = rs_or_single_client(maxPoolSize=1)
collection = client.pymongo_test.test
collection.drop()
collection.insert_many([{} for _ in range(200)])
self.addCleanup(client_context.client.pymongo_test.test.drop)
pool = get_pool(client)
pool._check_interval_seconds = None # Never check.
sock_info = one(pool.sockets)
cursor = collection.find(cursor_type=CursorType.EXHAUST)
# Initial query succeeds.
cursor.next()
# Cause a server error on getmore.
def receive_message(operation, request_id):
# Discard the actual server response.
SocketInfo.receive_message(sock_info, operation, request_id)
# responseFlags bit 1 is QueryFailure.
msg = struct.pack('<iiiii', 1 << 1, 0, 0, 0, 0)
msg += BSON.encode({'$err': 'mock err', 'code': 0})
return msg
saved = sock_info.receive_message
sock_info.receive_message = receive_message
self.assertRaises(OperationFailure, list, cursor)
sock_info.receive_message = saved
# The socket is returned the pool and it still works.
self.assertEqual(200, collection.count())
self.assertIn(sock_info, pool.sockets)
def test_exhaust_query_network_error(self):
# When doing an exhaust query, the socket stays checked out on success
# but must be checked in on error to avoid semaphore leaks.
client = connected(rs_or_single_client(maxPoolSize=1))
collection = client.pymongo_test.test
pool = get_pool(client)
pool._check_interval_seconds = None # Never check.
# Cause a network error.
sock_info = one(pool.sockets)
sock_info.sock.close()
cursor = collection.find(cursor_type=CursorType.EXHAUST)
self.assertRaises(ConnectionFailure, cursor.next)
self.assertTrue(sock_info.closed)
# The socket was closed and the semaphore was decremented.
self.assertNotIn(sock_info, pool.sockets)
self.assertTrue(pool._socket_semaphore.acquire(blocking=False))
def test_exhaust_getmore_network_error(self):
# When doing a getmore on an exhaust cursor, the socket stays checked
# out on success but it's checked in on error to avoid semaphore leaks.
client = rs_or_single_client(maxPoolSize=1)
collection = client.pymongo_test.test
collection.drop()
collection.insert_many([{} for _ in range(200)]) # More than one batch.
pool = get_pool(client)
pool._check_interval_seconds = None # Never check.
cursor = collection.find(cursor_type=CursorType.EXHAUST)
# Initial query succeeds.
cursor.next()
# Cause a network error.
sock_info = cursor._Cursor__exhaust_mgr.sock
sock_info.sock.close()
# A getmore fails.
self.assertRaises(ConnectionFailure, list, cursor)
self.assertTrue(sock_info.closed)
# The socket was closed and the semaphore was decremented.
self.assertNotIn(sock_info, pool.sockets)
self.assertTrue(pool._socket_semaphore.acquire(blocking=False))
class TestClientLazyConnect(IntegrationTest):
"""Test concurrent operations on a lazily-connecting MongoClient."""
def _get_client(self):
return rs_or_single_client(connect=False)
def test_insert_one(self):
def reset(collection):
collection.drop()
def insert_one(collection, _):
collection.insert_one({})
def test(collection):
self.assertEqual(NTHREADS, collection.count())
lazy_client_trial(reset, insert_one, test, self._get_client)
def test_update_one(self):
def reset(collection):
collection.drop()
collection.insert_one({'i': 0})
# Update doc 10 times.
def update_one(collection, _):
collection.update_one({}, {'$inc': {'i': 1}})
def test(collection):
self.assertEqual(NTHREADS, collection.find_one()['i'])
lazy_client_trial(reset, update_one, test, self._get_client)
def test_delete_one(self):
def reset(collection):
collection.drop()
collection.insert_many([{'i': i} for i in range(NTHREADS)])
def delete_one(collection, i):
collection.delete_one({'i': i})
def test(collection):
self.assertEqual(0, collection.count())
lazy_client_trial(reset, delete_one, test, self._get_client)
def test_find_one(self):
results = []
def reset(collection):
collection.drop()
collection.insert_one({})
results[:] = []
def find_one(collection, _):
results.append(collection.find_one())
def test(collection):
self.assertEqual(NTHREADS, len(results))
lazy_client_trial(reset, find_one, test, self._get_client)
def test_max_bson_size(self):
# Client should have sane defaults before connecting, and should update
# its configuration once connected.
c = self._get_client()
self.assertEqual(16 * (1024 ** 2), c.max_bson_size)
self.assertEqual(2 * c.max_bson_size, c.max_message_size)
# Make the client connect, so that it sets its max_bson_size and
# max_message_size attributes.
ismaster = c.db.command('ismaster')
self.assertEqual(ismaster['maxBsonObjectSize'], c.max_bson_size)
if 'maxMessageSizeBytes' in ismaster:
self.assertEqual(
ismaster['maxMessageSizeBytes'],
c.max_message_size)
class TestMongoClientFailover(MockClientTest):
def test_discover_primary(self):
# Disable background refresh.
with client_knobs(heartbeat_frequency=999999):
c = MockClient(
standalones=[],
members=['a:1', 'b:2', 'c:3'],
mongoses=[],
host='b:2', # Pass a secondary.
replicaSet='rs')
wait_until(lambda: len(c.nodes) == 3, 'connect')
self.assertEqual(c.address, ('a', 1))
# Fail over.
c.kill_host('a:1')
c.mock_primary = 'b:2'
c.close()
self.assertEqual(0, len(c.nodes))
t = c._get_topology()
t.select_servers(writable_server_selector) # Reconnect.
self.assertEqual(c.address, ('b', 2))
# a:1 not longer in nodes.
self.assertLess(len(c.nodes), 3)
# c:3 is rediscovered.
t.select_server_by_address(('c', 3))
def test_reconnect(self):
# Verify the node list isn't forgotten during a network failure.
c = MockClient(
standalones=[],
members=['a:1', 'b:2', 'c:3'],
mongoses=[],
host='b:2', # Pass a secondary.
replicaSet='rs')
wait_until(lambda: len(c.nodes) == 3, 'connect')
# Total failure.
c.kill_host('a:1')
c.kill_host('b:2')
c.kill_host('c:3')
# MongoClient discovers it's alone.
self.assertRaises(AutoReconnect, c.db.collection.find_one)
# But it can reconnect.
c.revive_host('a:1')
c._get_topology().select_servers(writable_server_selector)
self.assertEqual(c.address, ('a', 1))
def _test_network_error(self, operation_callback):
# Verify only the disconnected server is reset by a network failure.
# Disable background refresh.
with client_knobs(heartbeat_frequency=999999):
c = MockClient(
standalones=[],
members=['a:1', 'b:2'],
mongoses=[],
host='a:1',
replicaSet='rs',
connect=False)
# Set host-specific information so we can test whether it is reset.
c.set_wire_version_range('a:1', 0, 1)
c.set_wire_version_range('b:2', 0, 2)
c._get_topology().select_servers(writable_server_selector)
wait_until(lambda: len(c.nodes) == 2, 'connect')
c.kill_host('a:1')
# MongoClient is disconnected from the primary.
self.assertRaises(AutoReconnect, operation_callback, c)
# The primary's description is reset.
server_a = c._get_topology().get_server_by_address(('a', 1))
sd_a = server_a.description
self.assertEqual(SERVER_TYPE.Unknown, sd_a.server_type)
self.assertEqual(0, sd_a.min_wire_version)
self.assertEqual(0, sd_a.max_wire_version)
# ...but not the secondary's.
server_b = c._get_topology().get_server_by_address(('b', 2))
sd_b = server_b.description
self.assertEqual(SERVER_TYPE.RSSecondary, sd_b.server_type)
self.assertEqual(0, sd_b.min_wire_version)
self.assertEqual(2, sd_b.max_wire_version)
def test_network_error_on_query(self):
callback = lambda client: client.db.collection.find_one()
self._test_network_error(callback)
def test_network_error_on_insert(self):
callback = lambda client: client.db.collection.insert_one({})
self._test_network_error(callback)
def test_network_error_on_update(self):
callback = lambda client: client.db.collection.update_one(
{}, {'$unset': 'x'})
self._test_network_error(callback)
def test_network_error_on_replace(self):
callback = lambda client: client.db.collection.replace_one({}, {})
self._test_network_error(callback)
def test_network_error_on_delete(self):
callback = lambda client: client.db.collection.delete_many({})
self._test_network_error(callback)
if __name__ == "__main__":
unittest.main()
|
|
import six
import logging
from .base import AbstractCombinedNode
from datetime import timedelta, datetime
from ebu_tt_live.bindings._ebuttdt import LimitedClockTimingType, FullClockTimingType
from ebu_tt_live.bindings import _ebuttm as metadata
from ebu_tt_live.documents import EBUTT3Document
from ebu_tt_live.bindings.pyxb_utils import RecursiveOperation, StopBranchIteration
from ebu_tt_live.bindings.validation.timing import TimingValidationMixin
from ebu_tt_live.errors import UnexpectedSequenceIdentifierError
log = logging.getLogger(__name__)
class RetimingDelayNode(AbstractCombinedNode):
_document_sequence = None
_fixed_delay = None
_expects = EBUTT3Document
_provides = EBUTT3Document
def __init__(self, node_id, fixed_delay, document_sequence, consumer_carriage=None, producer_carriage=None):
super(RetimingDelayNode, self).__init__(
node_id=node_id,
producer_carriage=producer_carriage,
consumer_carriage=consumer_carriage
)
self._fixed_delay = fixed_delay
self._document_sequence = document_sequence
def process_document(self, document, **kwargs):
if self.is_document(document):
if document.sequence_identifier == self._document_sequence:
raise UnexpectedSequenceIdentifierError()
if self.check_if_document_seen(document=document):
self.limit_sequence_to_one(document)
# change the sequence identifier
document.sequence_identifier = self._document_sequence
if document.binding.head.metadata is None:
document.binding.head.metadata = metadata.headMetadata_type(
metadata.documentMetadata()
)
if document.binding.head.metadata.documentMetadata is None:
document.binding.head.metadata.documentMetadata = metadata.documentMetadata()
ap_metadata = metadata.appliedProcessing_type(
process='retimed by ' + str(self._fixed_delay) + 's',
generatedBy='retiming_delay_node_v1.0',
sourceId=self.node_id,
appliedDateTime=datetime.now()
)
document.binding.head.metadata.documentMetadata.appliedProcessing.append(ap_metadata)
if has_a_leaf_with_no_timing_path(document.binding.body):
update_body_timing(document.binding.body, document.time_base, self._fixed_delay)
else:
update_children_timing(document.binding, document.time_base, self._fixed_delay)
document.validate()
self.producer_carriage.emit_data(data=document, **kwargs)
else:
log.warning(
'Ignoring duplicate document: {}__{}'.format(
document.sequence_identifier,
document.sequence_number
)
)
else:
self.producer_carriage.emit_data(data=document, **kwargs)
class BufferDelayNode(AbstractCombinedNode):
_fixed_delay = None
_expects = six.text_type
_provides = six.text_type
def __init__(self, node_id, fixed_delay, consumer_carriage=None, producer_carriage=None):
super(BufferDelayNode, self).__init__(
node_id=node_id,
producer_carriage=producer_carriage,
consumer_carriage=consumer_carriage
)
self._fixed_delay = fixed_delay
def process_document(self, document, **kwargs):
self.producer_carriage.emit_data(data=document, delay=self._fixed_delay, **kwargs)
def update_children_timing(element, timebase, delay_int):
# if the element has a child
if hasattr(element, 'orderedContent'):
children = element.orderedContent()
for child in children:
if hasattr(child.value, 'end') and child.value.end != None:
if timebase == 'clock':
delay = LimitedClockTimingType(timedelta(seconds=delay_int))
child.value.end = LimitedClockTimingType(child.value.end.timedelta + delay.timedelta)
elif timebase == 'media':
delay = FullClockTimingType(timedelta(seconds=delay_int))
child.value.end = FullClockTimingType(child.value.end.timedelta + delay.timedelta)
if hasattr(child.value, 'begin') and child.value.begin != None:
if timebase == 'clock':
delay = LimitedClockTimingType(timedelta(seconds=delay_int))
child.value.begin = LimitedClockTimingType(child.value.begin.timedelta + delay.timedelta)
elif timebase == 'media':
delay = FullClockTimingType(timedelta(seconds=delay_int))
child.value.begin = FullClockTimingType(child.value.begin.timedelta + delay.timedelta)
else:
update_children_timing(child.value, timebase, delay_int)
def update_body_timing(body, timebase, delay_int):
if hasattr(body, 'begin'):
assert body.begin == None, "The body already has a begin time"
# we always update the begin attribute, regardless of the presence of a begin or end attribute
if timebase == 'clock':
delay = LimitedClockTimingType(timedelta(seconds=delay_int))
body.begin = LimitedClockTimingType(delay.timedelta)
elif timebase == 'media':
delay = FullClockTimingType(timedelta(seconds=delay_int))
body.begin = FullClockTimingType(delay.timedelta)
# if the body has an end attribute, we add to it the value of the delay
if hasattr(body, 'end') and body.end != None:
if timebase == 'clock':
delay = LimitedClockTimingType(timedelta(seconds=delay_int))
body.end = LimitedClockTimingType(body.end.timedelta + delay.timedelta)
elif timebase == 'media':
delay = FullClockTimingType(timedelta(seconds=delay_int))
body.end = FullClockTimingType(body.end.timedelta + delay.timedelta)
def is_explicitly_timed(element):
# if element has begin or end attribute
if hasattr(element, 'begin') and element.begin != None or hasattr(element, 'end') and element.end != None:
return True
else:
# if element has children
if hasattr(element, 'orderedContent'):
children = element.orderedContent()
for child in children:
res = is_explicitly_timed(child.value)
if res:
return res
class UntimedPathFinder(RecursiveOperation):
_path_found = False
_timed_element_stack = None
def __init__(self, root_element):
self._timed_element_stack = []
super(UntimedPathFinder, self).__init__(
root_element,
filter=lambda value, element: isinstance(value, TimingValidationMixin)
)
def _is_begin_timed(self, value):
if value.begin is not None:
return True
else:
return False
def _before_element(self, value, element=None, parent_binding=None, **kwargs):
if self._path_found is True:
raise StopBranchIteration()
if self._is_begin_timed(value=value):
self._timed_element_stack.append(value)
def _after_element(self, value, element=None, parent_binding=None, **kwargs):
if self._is_begin_timed(value=value):
bla = self._timed_element_stack.pop()
def _process_element(self, value, element=None, parent_binding=None, **kwargs):
if value.is_timed_leaf() and not len(self._timed_element_stack):
self._path_found = True
raise StopBranchIteration()
def _process_non_element(self, value, non_element, parent_binding=None, **kwargs):
pass
@property
def path_found(self):
return self._path_found
def has_a_leaf_with_no_timing_path(element):
"""
Check if a document has at least one leaf that has no ancestor that has begin time or has begin time itself.
@param element:
@return:
"""
finder = UntimedPathFinder(element)
finder.proceed()
return finder.path_found
|
|
"""Basic feedforward networks.
"""
from functools import partial, wraps;
import yaml;
import numpy as np;
import tensorflow as tf;
from .utils import get_randn_variable, get_zeros_variable, init_uninit;
__all__ = ['FullyConnectedModel', 'Model']
_IDENT_NAMES = set(['ident', 'identity', 'linear']);
def get_activation(activation, **kwargs):
"""Return tensorflow activation function from name and keyword
arguments.
"""
if activation in _IDENT_NAMES or activation is None:
activation_func = tf.identity;
else:
activation_func = getattr(tf.nn, activation);
f = partial(activation_func, **kwargs);
return f;
class Model(object):
"""Basic model class that wraps a TensorFlow network graph.
Attributes
----------
x : Tensor
Input placeholder.
y : Tensor
Output placeholder.
variables : list of tensorflow.Variable
Trainable variables (weights and biases).
dtype : str or tensorflow or NumPy dtype
The dtype for tensors in network.
input_shape
output_shape
"""
dtype = tf.float32;
def __init__(self):
raise NotImplementedError;
def init_variables(self):
"""Initialize all variables unless inside a variable scope where
``reuse=True``.
"""
if not tf.get_variable_scope().reuse:
init = tf.initialize_variables(self.variables);
init.run();
def get_config(self):
"""Return dictionary of keyword arguments that when passed to
``__init__`` will reconstruct the network (modulo values of
variables).
"""
raise NotImplementedError;
def fprop(self, x):
"""Propagate input forward through network.
Parameters
----------
x : ndarray or Tensor, (n_inputs, n_feats)
Batch of inputs to network.
Returns
-------
y : ndarray, (n_inputs, n_classes)
Batch of outputs of final layer of network.
"""
return self.y.eval(feed_dict={self.x : x});
def predict(self, x):
"""Propagate input forward through network to make predictions.
Parameters
----------
x : ndarray or Tensor, (n_inputs, ...
Batch of inputs to network.
Returns
-------
y : ndarray, (n_inputs,)
Batch of predictions. A vector of class integer labels.
"""
return self.fprop(x).argmax(axis=1);
def save_weights(self, fn):
saver = tf.train.Saver(self.variables);
saver.save(tf.get_default_session(), fn);
def load_weights(self, fn):
saver = tf.train.Saver(self.variables);
saver.restore(tf.get_default_session(), fn);
def __getstate__(self):
new_dict = self.__dict__.copy();
new_dict['x'] = None;
new_dict['y'] = None;
new_dict['variables'] = None;
return new_dict;
def __setstate__(self, dict):
self.__dict__.update(dict);
self.__init__(**self.get_config());
@property
def input_shape(self):
return self.x.get_shape();
@property
def output_shape(self):
return self.y.get_shape();
class FullyConnectedModel(Model):
"""Simple fully connected feedforward neural network.
Parameters
----------
input_dim : int
Dimensionality of input features.
out_dim : int
Dimensionality of output.
n_hid : int
Number of hidden layers.
hid_dim : int
Number of units in each hidden layer.
hid_activation : str, optional
Name of activation function to use for hidden layers. See also
``get_activation``.
(Default: 'relu')
out_activation : str, optional
Name of activation function to use for output layer. See also
``get_activation``.
input_dropout : float, optional
Proportion of input units to dropout.
(Default: 0.0)
hidden_dropout : float, optional
Proportion of output units to dropout.
(Default: 0.0)
"""
def __init__(self, input_dim, out_dim, n_hid, hid_dim,
hid_activation='relu', out_activation='softmax',
input_dropout=0.0, hidden_dropout=0.0):
self.__dict__.update(locals());
del self.self;
# Determine layer sizes.
layer_sizes = [input_dim] + [hid_dim]*n_hid + [out_dim];
n_layers = len(layer_sizes);
# Build input layer
with tf.variable_scope('input') as vs:
input_place = tf.placeholder('float32', [None, input_dim],
name='act');
acts = [input_place];
if input_dropout > 0:
acts.append(tf.nn.dropout(acts[-1], 1 - input_dropout,
name='drop'));
# Build hidden layers/output layer.
variables = [];
for ii, (prev_size, curr_size) in enumerate(zip(layer_sizes[:-1],
layer_sizes[1:])):
is_hidden_layer = ii < len(layer_sizes) - 2;
layer_name = 'hid_%d' % ii if is_hidden_layer else 'final';
with tf.variable_scope(layer_name) as vs:
# Initialize weights/biases for affine component.
sigma = np.sqrt(2 / float(prev_size + curr_size));
W = get_randn_variable('W', [prev_size, curr_size],
tf.float32, 0, sigma);
B = get_zeros_variable('B', [curr_size],
tf.float32);
variables.extend([W, B]);
# Add affine component and following nonlinearity.
if is_hidden_layer:
f = get_activation(hid_activation,
name='act');
else:
f = get_activation(out_activation, name='act');
acts.append(f(tf.matmul(acts[-1], W) + B));
# Apply dropout mask if requested.
if is_hidden_layer and hidden_dropout > 0:
acts.append(tf.nn.dropout(acts[-1],
1 - input_dropout,
name='drop'));
self.variables = variables;
self.x = input_place;
self.y = acts[-1];
self.init_variables();
@wraps(Model.get_config)
def get_config(self):
kwargs = {'input_dim' : self.input_dim,
'out_dim' : self.out_dim,
'n_hid' : self.n_hid, 'hid_dim' : self.hid_dim,
'hid_activation' : self.hid_activation,
'out_activation' : self.out_activation,
'input_dropout' : self.input_dropout,
'hidden_dropout' : self.hidden_dropout,
};
return kwargs;
class ConvModel(Model):
"""Simple convolutional feedforward neural network.
Parameters
----------
input_dim : tuple
Shape of input images (height x width x n_channels).
out_dim : int
Dimensionality of output.
n_fc : int, optional
Number of fully-connected layers.
(Default: 2)
n_fc_hid : int, optional
Number of units in each fully-connected layer.
(Default: 256)
fc_activation : str, optional
Name of activation function to use for fully-connected layers.
See also ``get_activation``.
(Default: 'relu')
out_activation : str, optional
Name of activation function to use for output layer. See also
``get_activation``.
dropout : float, optional
Proportion of units from the topmost convolutional layer and
hidden layers to dropout.
(Default: 0.5)
"""
def __init__(self, input_shape, out_dim, n_fc=2, n_fc_hid=256,
fc_activation='relu', out_activation='softmax',
dropout=0.5):
# TODO: Work out a better naming scheme.
self.__dict__.update(locals());
del self.self;
image_height, image_width, n_channels = input_shape;
# Build input layer
with tf.variable_scope('input') as vs:
input_place = tf.placeholder('float32',
[None] + list(input_shape),
name='act');
acts = [input_place];
# First convolutional (+pooling) layer.
variables = [];
with tf.variable_scope('conv1') as vs:
# Convolutional component.
filter_width = 5;
filter_height = 5;
n_filters = 32;
stride = 1;
sigma = np.sqrt(2 / float(n_filters + (filter_width*filter_height)));
kernel_shape = [filter_height, filter_width, n_channels,
n_filters];
kernel = get_randn_variable('W', kernel_shape, tf.float32, 0,
1e-4);
conv = tf.nn.conv2d(input_place, kernel, [1, stride, stride, 1],
padding = 'SAME');
B = get_zeros_variable('B', [n_filters], tf.float32);
acts.append(tf.nn.relu(conv + B, name='conv'));
variables.extend([kernel, B]);
# Pooling component.
pool_width = 2;
pool_height = 2;
stride = 2;
pool = tf.nn.max_pool(acts[-1],
ksize=[1, pool_height, pool_width, 1],
strides=[1, stride, stride, 1],
padding='SAME', name='pool');
acts.append(pool);
# Normalization component.
norm = tf.nn.lrn(acts[-1], 4, bias=1.0, alpha=0.001 / 9.0,
beta=0.75,
name='norm');
acts.append(norm);
# Second convolutional (+pooling) layer.
with tf.variable_scope('conv2') as vs:
# Convolutional component.
filter_width = 5;
filter_height = 5;
n_channels = n_filters;
n_filters = 32;
stride = 1;
sigma = np.sqrt(2 / float(n_filters + (filter_width*filter_height)));
kernel_shape = [filter_height, filter_width, n_channels,
n_filters];
kernel = get_randn_variable('W', kernel_shape, tf.float32,
0, 1e-4);
conv = tf.nn.conv2d(acts[-1], kernel,
[1, stride, stride, 1],
padding= 'SAME');
B = get_zeros_variable('B', [n_filters], tf.float32);
acts.append(tf.nn.relu(conv + B, name='conv'));
variables.extend([kernel, B]);
# Pooling component.
pool_width = 2;
pool_height = 2;
pool_shape = [1, pool_height, pool_width, 1];
stride = 2;
pool = tf.nn.max_pool(acts[-1],
ksize=[1, pool_height, pool_width, 1],
strides=[1, stride, stride, 1],
padding='SAME', name='pool');
acts.append(pool);
# Normalization component.
norm = tf.nn.lrn(acts[-1], 4, bias=1.0, alpha=0.001 / 9.0,
beta=0.75, name='norm');
acts.append(norm);
# Dropout
if dropout > 0.0:
acts.append(tf.nn.dropout(acts[-1], 0.5, name='drop'));
# Flatten tensors so that for fully-connected layers so can
# use efficient SGEMM calls.
n_cols = int(np.prod(map(int, acts[-1].get_shape()[1:])));
reshape = tf.reshape(acts[-1], [-1, n_cols], name='reshape');
acts.append(reshape);
# Fully connected layers.
layer_sizes = [n_cols] + [n_fc_hid]*n_fc;
for ii, (prev_size, curr_size) in enumerate(zip(layer_sizes[:-1],
layer_sizes[1:])):
with tf.variable_scope('fc_%d' % ii) as vs:
# Init weights/biases and add matmul op.
sigma = np.sqrt(2 / float(prev_size + curr_size));
W = get_randn_variable('W', [prev_size, curr_size],
tf.float32, 0, sigma);
B = get_zeros_variable('B', [curr_size], tf.float32);
variables.extend([W, B]);
net_input = tf.matmul(acts[-1], W) + B;
acts.append(tf.nn.relu(net_input, name='act'));
if dropout > 0:
acts.append(tf.nn.dropout(acts[-1], 0.5,
name='drop'));
# Output layer.
with tf.variable_scope('final') as vs:
f = get_activation(out_activation, name=vs.name);
# Init weights/biases and add matmul op.
prev_size = curr_size;
curr_size = out_dim;
sigma = np.sqrt(2 / float(prev_size + curr_size));
W = get_randn_variable('W', [prev_size, curr_size],
tf.float32, 0, sigma);
B = get_zeros_variable('B', [curr_size], tf.float32);
variables.extend([W, B]);
net_input = tf.matmul(acts[-1], W) +B;
acts.append(f(net_input, name='act'));
self.variables = variables;
self.x = input_place;
self.y = acts[-1];
self.init_variables();
def get_config(self):
kwargs = {'input_shape' : self.input_shape,
'out_dim' : self.out_dim,
'n_fc' : self.n_fc,
'n_fc_hid' : self.n_fc_hid,
'fc_activation' : self.fc_activation,
'out_activation' : self.out_activation,
'dropout' : self.dropout,
};
return kwargs;
|
|
"""
Match segments
--------------
Performs a diffs using a tree of matchable segments in order to remain robust
to content moves. This module supports the use of a custom
:class:`~deltas.segmenters.Segmenter`.
.. automethod:: deltas.algorithms.segment_matcher.diff
.. automethod:: deltas.algorithms.segment_matcher.diff_segments
.. automethod:: deltas.algorithms.segment_matcher.process
.. autoclass:: SegmentMatcher
:members:
"""
from collections import defaultdict
from . import sequence_matcher
from ..operations import Delete, Equal, Insert
from ..segmenters import (MatchableSegment, ParagraphsSentencesAndWhitespace,
Segment, Segmenter)
from ..tokenizers import Token, Tokenizer, text_split
from .diff_engine import DiffEngine
SEGMENTER = ParagraphsSentencesAndWhitespace()
TOKENIZER = text_split
def diff(a, b, segmenter=None):
"""
Performs a diff comparison between two sequences of tokens (`a` and `b`)
using `segmenter` to cluster and match
:class:`deltas.segmenters.MatchableSegment`.
:Example:
>>> from deltas import segment_matcher, text_split
>>>
>>> a = text_split.tokenize("This is some text. This is some other text.")
>>> b = text_split.tokenize("This is some other text. This is some text.")
>>> operations = segment_matcher.diff(a, b)
>>>
>>> for op in operations:
... print(op.name, repr(''.join(a[op.a1:op.a2])), repr(''.join(b[op.b1:op.b2])))
...
equal 'This is some other text.' 'This is some other text.'
insert ' ' ' '
equal 'This is some text.' 'This is some text.'
delete ' ' ''
:Parameters:
a : `list`(:class:`deltas.tokenizers.Token`)
Initial sequence
b : `list`(:class:`deltas.tokenizers.Token`)
Changed sequence
segmenter : :class:`deltas.segmenters.Segmenter`
A segmenter to use on the tokens.
:Returns:
An `iterable` of operations.
"""
a, b = list(a), list(b)
segmenter = segmenter or SEGMENTER
# Cluster the input tokens
a_segments = segmenter.segment(a)
b_segments = segmenter.segment(b)
return diff_segments(a_segments, b_segments)
def diff_segments(a_segments, b_segments):
"""
Performs a diff comparison between two pre-clustered
:class:`deltas.segmenters.Segment` trees. In most cases, segmentation
takes 100X more time than actually performing the diff.
:Parameters:
a_segments : :class:`deltas.segmenters.Segment`
An initial sequence
b_segments : :class:`deltas.segmenters.Segment`
A changed sequence
:Returns:
An `iterable` of operations.
"""
# Match and re-sequence unmatched tokens
a_segment_tokens, b_segment_tokens = _cluster_matching_segments(a_segments,
b_segments)
# Perform a simple LCS over unmatched tokens and clusters
clustered_ops = sequence_matcher.diff(a_segment_tokens, b_segment_tokens)
# Return the expanded (de-clustered) operations
return (op for op in SegmentOperationsExpander(clustered_ops,
a_segment_tokens,
b_segment_tokens).expand())
def process(texts, *args, **kwargs):
"""
Processes a single sequence of texts with a
:class:`~diffengine.algoroithms.SegmentMatcher`.
:Parameters:
texts : `iterable`(`str`)
sequence of texts
args : `tuple`
passed to :class:`~diffengine.algorithms.SegmentMatcher`'s
constructor
kwaths : `dict`
passed to :class:`~diffengine.algorithms.SegmentMatcher`'s
constructor
"""
processor = SegmentMatcher.Processor(*args, **kwargs)
for text in texts:
yield processor.process(text)
class SegmentMatcher(DiffEngine):
"""
Constructs a segment matcher diff engine that preserves segmentation state
and is able to process changes sequentially. When detecting changes
across many versions of a text this strategy will be about twice as fast as
calling :func:`diff` sequentially.
:Example:
>>> from deltas.algorithms import SegmentMatcher
>>> from deltas.tokenizers import text_split
>>>
>>> engine = SegmentMatcher(text_split)
>>>
>>> processor = engine.processor()
>>> ops, a, b = processor.process("This is a version. It has some " +
"text in it.")
>>> print(" ".join(repr(''.join(b[op.b1:op.b2])) for op in ops))
'This is a version. It has some text in it.'
>>> ops, a, b = processor.process("This is a version. However, it " +
"has different.")
>>> print(" ".join(repr(''.join(b[op.b1:op.b2])) for op in ops))
'This is a version. ' '' 'However, it' ' has ' '' 'different' '.'
>>> ops, a, b = processor.process("Switching it up here. This is a " +
"version.")
>>> print(" ".join(repr(''.join(b[op.b1:op.b2])) for op in ops))
'' 'Switching' ' it ' '' 'up' ' ' '' 'here' '.' ' ' 'This is a version.'
"""
class Processor(DiffEngine.Processor):
"""
A processor used by the SegmentMatcher difference engine to track the
history of a single text.
"""
def __init__(self, tokenizer=None, segmenter=None, last_text=None,
last_tokens=None, last_segments=None):
self.tokenizer = tokenizer or TOKENIZER
self.segmenter = segmenter or SEGMENTER
self.update(last_text, last_tokens, last_segments)
def update(self, last_text=None, last_tokens=None, last_segments=None):
if last_segments is not None:
self.last_segments = last_segments
self.last_tokens = self.last_segments.tokens()
elif last_tokens is not None:
self.last_tokens = last_tokens
self.last_segments = self.segmenter.segment(last_tokens)
elif last_text is not None:
self.last_tokens = self.tokenizer.tokenize(last_text)
self.last_segments = self.segmenter.segment(self.last_tokens)
else:
self.last_tokens = []
self.last_segments = Segment()
def process(self, text, token_class=Token):
"""
Processes a new version of a text and returns the delta.
:Parameters:
text : `str`
The text to process
:Returns:
A tuple of `operations`, `a_tokens`, `b_tokens`
"""
# Tokenize and segment
tokens = self.tokenizer.tokenize(text, token_class=token_class)
segments = self.segmenter.segment(tokens)
return self.process_segments(segments, tokens=tokens)
def process_segments(self, segments, tokens=None):
if tokens is None: tokens = segments.tokens()
# Perform diff
operations = diff_segments(self.last_segments, segments)
# Update state
a = self.last_tokens
b = tokens
self.last_tokens = tokens
self.last_segments = segments
# Return delta
return operations, a, b
def __init__(self, tokenizer=None, segmenter=None):
self.tokenizer = tokenizer or TOKENIZER
self.segmenter = segmenter or SEGMENTER
def processor(self, *args, **kwargs):
"""
Constructs and configures a processor to process versions of a text.
"""
return self.Processor(self.tokenizer, self.segmenter, *args, **kwargs)
def process(self, texts, *args, **kwargs):
return process(texts, self.tokenizer, self.segmenter, *args, **kwargs)
@classmethod
def from_config(cls, config, name, section_key="diff_engines"):
section = config[section_key][name]
return cls(
Tokenizer.from_config(config, section['tokenizer']),
Segmenter.from_config(config, section['segmenter'])
)
def _cluster_matching_segments(a_segments, b_segments):
# Generate a look-up map for matchable segments in 'a'
a_segment_map = _build_segment_map(a_segments)
# Find and cluster matching content in 'b'
b_segment_tokens = list(_match_segments(a_segment_map, b_segments))
# Expand unmatched segments from 'a'
a_segment_tokens = list(_expand_unmatched_segments(a_segments))
return a_segment_tokens, b_segment_tokens
def _build_segment_map(segments):
d = defaultdict(list)
for matchable_segment in _get_matchable_segments(segments):
d[matchable_segment].append(matchable_segment)
return d
def _get_matchable_segments(segments):
"""
Performs a depth-first search of the segment tree to get all matchable
segments.
"""
for subsegment in segments:
if isinstance(subsegment, Token):
break # No tokens allowed next to segments
if isinstance(subsegment, Segment):
if isinstance(subsegment, MatchableSegment):
yield subsegment
for matchable_subsegment in _get_matchable_segments(subsegment):
yield matchable_subsegment
def _match_segments(a_segment_map, b_segments):
for subsegment in b_segments:
if isinstance(subsegment, Segment):
if isinstance(subsegment, MatchableSegment) and \
subsegment in a_segment_map:
matched_segments = a_segment_map[subsegment] # Get matches
for matched_segment in matched_segments: # For each match
matched_segment.match = subsegment # flag as matched
subsegment.match = matched_segments[0] # Always associate with first match
yield subsegment # Dump matched segment
else:
for seg_or_tok in _match_segments(a_segment_map, subsegment):
yield seg_or_tok # Recurse
else:
yield subsegment # Dump token
def _expand_unmatched_segments(a_segments):
for subsegment in a_segments:
# Check if a segment is matched.
if isinstance(subsegment, Segment):
if isinstance(subsegment, MatchableSegment) and \
subsegment.match is not None:
yield subsegment # Yield matched segment as cluster
else:
for seg_or_tok in _expand_unmatched_segments(subsegment):
yield seg_or_tok # Recurse
else:
yield subsegment # Dump token
class SegmentOperationsExpander:
def __init__(self, operations, a_token_segments, b_token_segments):
self.a_pos = 0
self.b_pos = 0
self.a_token_segments = a_token_segments
self.b_token_segments = b_token_segments
self.operations = operations
def expand(self):
for operation in self.operations:
if isinstance(operation, Equal):
#print("Processing equal: {0} {1}".format(self.a_pos, self.b_pos))
expanded_operations = self._process_equal(operation)
elif isinstance(operation, Insert):
#print("Processing insert: {0} {1}".format(self.a_pos, self.b_pos))
expanded_operations = self._process_insert(operation)
elif isinstance(operation, Delete):
#print("Processing remove: {0} {1}".format(self.a_pos, self.b_pos))
expanded_operations = self._process_delete(operation)
else:
assert False, "Should never happen"
for operation in expanded_operations: yield operation
def _process_equal(self, op):
a1 = self.a_pos
b1 = self.b_pos
token_len = sum(1 for t_s in self.a_token_segments[op.a1:op.a2]
for _ in t_s.tokens())
self.a_pos += token_len
self.b_pos += token_len
yield Equal(a1, self.a_pos, b1, self.b_pos)
def _process_insert(self, op):
inserted_token_count = 0
for t_s in self.b_token_segments[op.b1:op.b2]:
if isinstance(t_s, Token):
inserted_token_count += 1
else: # Found a matched segment
segment = t_s
# First, emit an insert for the tokens we have seen so far
if inserted_token_count > 0:
b1 = self.b_pos
self.b_pos += inserted_token_count
yield Insert(self.a_pos, self.a_pos, b1, self.b_pos)
inserted_token_count = 0
# Now, emit an Equal for the matched segment
b1 = self.b_pos
self.b_pos += sum(1 for _ in segment.tokens())
yield Equal(segment.match.start, segment.match.end,
b1, self.b_pos)
# Cleanup! Make sure we emit any remaining inserted tokens.
if inserted_token_count > 0:
b1 = self.b_pos
self.b_pos += inserted_token_count
yield Insert(self.a_pos, self.a_pos, b1, self.b_pos)
inserted_token_count = 0
def _process_delete(self, op):
removed_token_count = 0
for t_s in self.a_token_segments[op.a1:op.a2]:
if isinstance(t_s, Token):
removed_token_count += 1
else: # Found a matched token... not removed -- just moved
segment = t_s
if removed_token_count > 0:
a1 = self.a_pos
self.a_pos += removed_token_count
yield Delete(a1, self.a_pos, self.b_pos, self.b_pos)
removed_token_count = 0
# update & reset!
self.a_pos += sum(1 for _ in segment.tokens())
# cleanup
if removed_token_count > 0:
a1 = self.a_pos
self.a_pos += removed_token_count
yield Delete(a1, self.a_pos, self.b_pos, self.b_pos)
|
|
#!/usr/bin/env python
#
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides a convenient wrapper for spawning a test lighttpd instance.
Usage:
lighttpd_server PATH_TO_DOC_ROOT
"""
import codecs
import contextlib
import httplib
import os
import random
import shutil
import socket
import subprocess
import sys
import tempfile
import time
from pylib import constants
from pylib import pexpect
class LighttpdServer(object):
"""Wraps lighttpd server, providing robust startup.
Args:
document_root: Path to root of this server's hosted files.
port: TCP port on the _host_ machine that the server will listen on. If
ommitted it will attempt to use 9000, or if unavailable it will find
a free port from 8001 - 8999.
lighttpd_path, lighttpd_module_path: Optional paths to lighttpd binaries.
base_config_path: If supplied this file will replace the built-in default
lighttpd config file.
extra_config_contents: If specified, this string will be appended to the
base config (default built-in, or from base_config_path).
config_path, error_log, access_log: Optional paths where the class should
place temprary files for this session.
"""
def __init__(self, document_root, port=None,
lighttpd_path=None, lighttpd_module_path=None,
base_config_path=None, extra_config_contents=None,
config_path=None, error_log=None, access_log=None):
self.temp_dir = tempfile.mkdtemp(prefix='lighttpd_for_chrome_android')
self.document_root = os.path.abspath(document_root)
self.fixed_port = port
self.port = port or constants.LIGHTTPD_DEFAULT_PORT
self.server_tag = 'LightTPD ' + str(random.randint(111111, 999999))
self.lighttpd_path = lighttpd_path or '/usr/sbin/lighttpd'
self.lighttpd_module_path = lighttpd_module_path or '/usr/lib/lighttpd'
self.base_config_path = base_config_path
self.extra_config_contents = extra_config_contents
self.config_path = config_path or self._Mktmp('config')
self.error_log = error_log or self._Mktmp('error_log')
self.access_log = access_log or self._Mktmp('access_log')
self.pid_file = self._Mktmp('pid_file')
self.process = None
def _Mktmp(self, name):
return os.path.join(self.temp_dir, name)
def _GetRandomPort(self):
# The ports of test server is arranged in constants.py.
return random.randint(constants.LIGHTTPD_RANDOM_PORT_FIRST,
constants.LIGHTTPD_RANDOM_PORT_LAST)
def StartupHttpServer(self):
"""Starts up a http server with specified document root and port."""
# If we want a specific port, make sure no one else is listening on it.
if self.fixed_port:
self._KillProcessListeningOnPort(self.fixed_port)
while True:
if self.base_config_path:
# Read the config
with codecs.open(self.base_config_path, 'r', 'utf-8') as f:
config_contents = f.read()
else:
config_contents = self._GetDefaultBaseConfig()
if self.extra_config_contents:
config_contents += self.extra_config_contents
# Write out the config, filling in placeholders from the members of |self|
with codecs.open(self.config_path, 'w', 'utf-8') as f:
f.write(config_contents % self.__dict__)
if (not os.path.exists(self.lighttpd_path) or
not os.access(self.lighttpd_path, os.X_OK)):
raise EnvironmentError(
'Could not find lighttpd at %s.\n'
'It may need to be installed (e.g. sudo apt-get install lighttpd)'
% self.lighttpd_path)
self.process = pexpect.spawn(self.lighttpd_path,
['-D', '-f', self.config_path,
'-m', self.lighttpd_module_path],
cwd=self.temp_dir)
client_error, server_error = self._TestServerConnection()
if not client_error:
assert int(open(self.pid_file, 'r').read()) == self.process.pid
break
self.process.close()
if self.fixed_port or not 'in use' in server_error:
print 'Client error:', client_error
print 'Server error:', server_error
return False
self.port = self._GetRandomPort()
return True
def ShutdownHttpServer(self):
"""Shuts down our lighttpd processes."""
if self.process:
self.process.terminate()
shutil.rmtree(self.temp_dir, ignore_errors=True)
def _TestServerConnection(self):
# Wait for server to start
server_msg = ''
for timeout in xrange(1, 5):
client_error = None
try:
with contextlib.closing(httplib.HTTPConnection(
'127.0.0.1', self.port, timeout=timeout)) as http:
http.set_debuglevel(timeout > 3)
http.request('HEAD', '/')
r = http.getresponse()
r.read()
if (r.status == 200 and r.reason == 'OK' and
r.getheader('Server') == self.server_tag):
return (None, server_msg)
client_error = ('Bad response: %s %s version %s\n ' %
(r.status, r.reason, r.version) +
'\n '.join([': '.join(h) for h in r.getheaders()]))
except (httplib.HTTPException, socket.error) as client_error:
pass # Probably too quick connecting: try again
# Check for server startup error messages
ix = self.process.expect([pexpect.TIMEOUT, pexpect.EOF, '.+'],
timeout=timeout)
if ix == 2: # stdout spew from the server
server_msg += self.process.match.group(0)
elif ix == 1: # EOF -- server has quit so giveup.
client_error = client_error or 'Server exited'
break
return (client_error or 'Timeout', server_msg)
def _KillProcessListeningOnPort(self, port):
"""Checks if there is a process listening on port number |port| and
terminates it if found.
Args:
port: Port number to check.
"""
if subprocess.call(['fuser', '-kv', '%d/tcp' % port]) == 0:
# Give the process some time to terminate and check that it is gone.
time.sleep(2)
assert subprocess.call(['fuser', '-v', '%d/tcp' % port]) != 0, \
'Unable to kill process listening on port %d.' % port
def _GetDefaultBaseConfig(self):
return """server.tag = "%(server_tag)s"
server.modules = ( "mod_access",
"mod_accesslog",
"mod_alias",
"mod_cgi",
"mod_rewrite" )
# default document root required
#server.document-root = "."
# files to check for if .../ is requested
index-file.names = ( "index.php", "index.pl", "index.cgi",
"index.html", "index.htm", "default.htm" )
# mimetype mapping
mimetype.assign = (
".gif" => "image/gif",
".jpg" => "image/jpeg",
".jpeg" => "image/jpeg",
".png" => "image/png",
".svg" => "image/svg+xml",
".css" => "text/css",
".html" => "text/html",
".htm" => "text/html",
".xhtml" => "application/xhtml+xml",
".xhtmlmp" => "application/vnd.wap.xhtml+xml",
".js" => "application/x-javascript",
".log" => "text/plain",
".conf" => "text/plain",
".text" => "text/plain",
".txt" => "text/plain",
".dtd" => "text/xml",
".xml" => "text/xml",
".manifest" => "text/cache-manifest",
)
# Use the "Content-Type" extended attribute to obtain mime type if possible
mimetype.use-xattr = "enable"
##
# which extensions should not be handle via static-file transfer
#
# .php, .pl, .fcgi are most often handled by mod_fastcgi or mod_cgi
static-file.exclude-extensions = ( ".php", ".pl", ".cgi" )
server.bind = "127.0.0.1"
server.port = %(port)s
## virtual directory listings
dir-listing.activate = "enable"
#dir-listing.encoding = "iso-8859-2"
#dir-listing.external-css = "style/oldstyle.css"
## enable debugging
#debug.log-request-header = "enable"
#debug.log-response-header = "enable"
#debug.log-request-handling = "enable"
#debug.log-file-not-found = "enable"
#### SSL engine
#ssl.engine = "enable"
#ssl.pemfile = "server.pem"
# Autogenerated test-specific config follows.
cgi.assign = ( ".cgi" => "/usr/bin/env",
".pl" => "/usr/bin/env",
".asis" => "/bin/cat",
".php" => "/usr/bin/php-cgi" )
server.errorlog = "%(error_log)s"
accesslog.filename = "%(access_log)s"
server.upload-dirs = ( "/tmp" )
server.pid-file = "%(pid_file)s"
server.document-root = "%(document_root)s"
"""
def main(argv):
server = LighttpdServer(*argv[1:])
try:
if server.StartupHttpServer():
raw_input('Server running at http://127.0.0.1:%s -'
' press Enter to exit it.' % server.port)
else:
print 'Server exit code:', server.process.exitstatus
finally:
server.ShutdownHttpServer()
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
|
import os;
import time;
import zipfile
import shutil
import traceback
Dev = r"..\dev" #relative to build.py
Release = r"..\release" #relative to build.py
PathToMonk = r"Jagent\Monk.jar" #relative to build.py
Readme = "readme.txt" #relative to dev
Versionfile = "version.txt" #relative to build.py
FileTypesToParse= (".txt",".cos",".pray.cos", ".catalogue")
ReleaseName = "Children of Capillata"
ReleaseTemp = os.path.join(Release, "temp")
VersionArray = ["0", "0", "0", "0"]
def main():
#read in version from versionFile, update it, and save it to versionFile
with open(Versionfile, 'r+') as f:
versionFileLineOld = f.readline().rstrip()
#Find last build number and add 1 to it
versionArrayOld = versionFileLineOld.split(".")
assert(len(versionArrayOld) == 4)
assert(versionArrayOld[0].isdigit())
assert(versionArrayOld[1].isdigit())
assert(versionArrayOld[2].isdigit())
assert(versionArrayOld[3].isdigit()
or versionArrayOld[3]=="-1"
or versionArrayOld[3]=="x")
oldBuildNumber = versionArrayOld[3]
if(oldBuildNumber.isdigit() or oldBuildNumber=="-1"):
newBuildNumber = str(int(oldBuildNumber) + 1)
else:
# Uninitialized repo clone. Start at 1
newBuildNumber = "1"
versionArrayOld[3] = newBuildNumber
global VersionArray
VersionArray = versionArrayOld
versionFileLineNew = '.'.join(VersionArray)
#Save new version
f.seek(0)
f.write(versionFileLineNew + "\n")
f.truncate()
print("Building " + versionFileLineNew)
print(" " + Formatter.getFormat("version", "vW.X|vW.X.Y|vW.X.Y.Z"))
print(" " + Formatter.getFormat("version", "vW.X|vW.X.Y.Z-T"))
print(" " + Formatter.getFormat("version", "N vW.X|vW.X.Y.Z-T"))
print("")
#clear out or make release folder
if os.path.exists(Release):
for root, dirs, files in os.walk(Release):
for f in files:
os.unlink(os.path.join(root, f))
for d in dirs:
shutil.rmtree(os.path.join(root, d))
else:
os.makedirs(Release)
os.makedirs(ReleaseTemp)
for dirpath, dirnames, filenames in os.walk(Dev):
if(len(filenames)!=0):
print("Processing " + os.path.join(dirpath))
# Replace all ReSTing's (Replacement System Thing piece of text) with their parsed outputs.
# For example:
#[[<[[Format: version, vA.B|vA.B.C.D-T]]>]]
# may become
# v1.3
# or
# V1.3.0.1-alpha
for filename in [f for f in filenames if f.endswith(FileTypesToParse)]:
fullPathFile = os.path.join(dirpath, filename)
print("Backing up " + fullPathFile)
with open(fullPathFile, 'r+') as f:
text = f.read()
# Make backup of original file
with open(fullPathFile + ".bak", 'w') as backupFile:
backupFile.write(text)
f.seek(0)
f.write(Parser.parseAndReplace(text))
f.truncate()
for filename in [f for f in filenames if f.endswith(".pray.cos")]:
# Package using Monk
fullPathCos = os.path.join(dirpath, filename)
print("Packaging " + fullPathCos)
syatemArg = PathToMonk + " " + "\"" + fullPathCos + "\""
os.system(syatemArg)
for dirpath, dirnames, filenames in os.walk(dirpath):
for filename in [f for f in filenames if f.endswith(".agents")]:
# Moving the .agents to the releaseTemp folder
fullPathAgent = os.path.join(dirpath, filename)
newName = os.path.join(ReleaseTemp, filename)
print("Moving " + newName)
if os.path.exists(newName):
os.remove(newName)
os.rename(fullPathAgent, newName)
if(len(filenames)!=0):
print("End " + os.path.join(dirpath), end="\n\n")
# Copy the readme
readmeDestination = os.path.join(ReleaseTemp, Readme)
print("\nCopying " + readmeDestination)
if os.path.exists(readmeDestination):
os.remove(readmeDestination)
shutil.copyfile(os.path.join(Dev, Readme), readmeDestination)
# Zip up files
zipFile = os.path.join(Release, Formatter.getFormat("version", "N vW.X|vW.X.Y.Z-T"))
print("Zipping " + zipFile + ".zip")
shutil.make_archive(zipFile, 'zip', ReleaseTemp)
shutil.rmtree(ReleaseTemp)
# Replace original files with backups
print("")
for dirpath, dirnames, filenames in os.walk(Dev):
for filename in [f for f in filenames if f.endswith(".bak")]:
fullPathFileWithBak = os.path.join(dirpath, filename)
fullPathFileNoBak = fullPathFileWithBak.replace(".bak", "")
print("Deleting " + fullPathFileWithBak)
os.remove(fullPathFileNoBak)
os.rename(fullPathFileWithBak, fullPathFileNoBak)
class Parser():
@staticmethod
def parseAndReplace(fileIn):
index = 0
state = 0
restingStart = -1
lastRestingEnd = -1
fileOutChunked = []
# We're looking for strings like "[[<[[blah blah blah]]>]]"
# The state variable stores how far we've gotten:
# 0 Looking for `[`[<[[ ]]>]]
# 1 Looking for [`[`<[[ ]]>]]
# 2 Looking for [[`<`[[ ]]>]]
# 3 Looking for [[<`[`[ ]]>]]
# 4 Looking for [[<[`[` ]]>]]
# 5 Looking for [[<[[ `]`]>]]
# 6 Looking for [[<[[ ]`]`>]]
# 7 Looking for [[<[[ ]]`>`]]
# 8 Looking for [[<[[ ]]>`]`]
# 9 Looking for [[<[[ ]]>]`]`
while index < len(fileIn):
if(state==0):
if(fileIn[index]=='['):
state = 1
elif(state==1):
if(fileIn[index]=='['):
state = 2
else:
state = 0
elif(state==2):
if(fileIn[index]=='<'):
state = 3
else:
state = 0
elif(state==3):
if(fileIn[index]=='['):
state = 4
else:
state = 0
elif(state==4):
if(fileIn[index]=='['):
state = 5
restingStart = index - 4
# We found an opening [[<[[
# Save everything up till here for file output
fileOutChunked += fileIn[lastRestingEnd+1:restingStart]
else:
state = 0
elif(state==5):
if(fileIn[index]==']'):
state = 6
elif(state==6):
if(fileIn[index]==']'):
state = 7
else:
state = 5
elif(state==7):
if(fileIn[index]=='>'):
state = 8
else:
state = 5
elif(state==8):
if(fileIn[index]==']'):
state = 9
else:
state = 5
else:
if(fileIn[index]==']'):
# We found a closing ]]>]]
state = 0
lastRestingEnd = index
fileOutChunked += Parser.parseResting(fileIn, restingStart, index)
else:
state = 5
index = index+1
if( state > 4):
assert(False, "Error: Found opening [[<[[ but no closing ]]>]].")
# Save everything else for file output
fileOutChunked += fileIn[lastRestingEnd+1:index]
return(''.join(fileOutChunked))
@staticmethod
def parseResting(fileIn, startIndex, endIndex):
#resting = fileIn[startIndex:endIndex+1]
resting = fileIn[startIndex+5:endIndex-4]
commandEnd = str.find(resting, ':')
command = resting[:commandEnd]
arguments = resting[commandEnd+1:].split(",")
arguments = filter(None, arguments)
argumentsList = list(map(str.strip, arguments))
if(command.upper() == "FORMAT"):
toReturn = Formatter.getFormat(argumentsList[0], argumentsList[1])
else:
toReturn = ""
return toReturn
class Formatter():
types = ["version", "date"]
class ReleaseType():
FULL = 0 # 1.0|1.1 Regular release
BETA = 1 # 1.1.1 Beta test release
ALPHA = 2 # 1.1.1.1 Alpha test (maybe release)
@staticmethod
def validType(value):
assert(isinstance(value, str))
return any(type == value for type in Formatter.types)
@classmethod
def getFormat(cls, formatType, formatArgment):
assert(Formatter.validType(formatType))
assert(isinstance(formatArgment, str))
if( VersionArray[3]!="0" ):
# x.x.x.1
type = cls.ReleaseType.ALPHA
elif( VersionArray[2]!="0" ):
# x.x.1.0
type = cls.ReleaseType.BETA
else:
# x.x.0.0
type = cls.ReleaseType.FULL
toReturn = ""
# BEGIN -Garbage which needs to be replaced.
if(formatArgment == "vW.X|vW.X.Y.Z-T"):
if(type == cls.ReleaseType.FULL):
toReturn = (
"v" +
VersionArray[0] +
"." +
VersionArray[1] )
else:
toReturn = (
"v" +
VersionArray[0] +
"." +
VersionArray[1] +
"." +
VersionArray[2] +
"." +
VersionArray[3] )
if(type == cls.ReleaseType.BETA):
toReturn += "-beta"
else:
toReturn += "-alpha"
elif(formatArgment == "vW.X|vW.X.Y|vW.X.Y.Z"):
if(type == cls.ReleaseType.FULL):
toReturn = (
"v" +
VersionArray[0] +
"." +
VersionArray[1] )
elif(type == cls.ReleaseType.BETA):
toReturn = (
"v" +
VersionArray[0] +
"." +
VersionArray[1] +
"." +
VersionArray[2] )
else:
toReturn = (
"v" +
VersionArray[0] +
"." +
VersionArray[1] +
"." +
VersionArray[2] +
"." +
VersionArray[3] )
elif(formatArgment == "N vW.X|vW.X.Y.Z-T"):
if(type == cls.ReleaseType.FULL):
toReturn = (
ReleaseName +
" v" +
VersionArray[0] +
"." +
VersionArray[1] )
else:
toReturn = (
"v" +
VersionArray[0] +
"." +
VersionArray[1] +
"." +
VersionArray[2] +
"." +
VersionArray[3] )
if(type == cls.ReleaseType.BETA):
toReturn += "-beta"
else:
toReturn += "-alpha"
else:
assert(False)
# END -Garbage which needs to be replaced.
# -Jacob Gantt 2019/2/13
return toReturn
try:
main()
print("\n\ndone.")
print("(5 seconds...)")
time.sleep(5)
except Exception as e:
print("\n\nError, build failed:\n\n" + str(e) + "\n\n")
print(traceback.format_exc())
print("\n\ndone.")
input("Press any key to exit.")
|
|
"""
Copyright (c) 2009, Horst Gutmann
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the project nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-----------------------------------------------------------------------------
This module holds all the structures produced by the parser. The main
structures are the clases ``Bibliography`` and ``Entry``. Both are
slightly enhanced subclasses of ``dict`` and offer some additional
field validation.
Entry also has a handful of subclasses; one for each common entry-type
in BibTeX.
"""
from ..bibtex import exceptions
class TypeRegistry(object):
"""
Global registry for entry types.
"""
_registry = {}
@classmethod
def register(cls, name, type_):
"""
Register a new type for an entry-type. The 2nd argument has to be a
subclass of structures.Entry.
"""
if not issubclass(type_, Entry):
raise exceptions.InvalidEntryType("%s is not a subclass of Entry" % str(type_))
cls._registry[name.lower()] = type_
@classmethod
def get_type(cls, name):
"""
Retrieve a type from the registry using its name as used in a bibtex
file.
"""
return cls._registry.get(name.lower())
class Bibliography(dict):
"""
A counter for all entries of a BibTeX file. It also contains the
cross-reference validator.
"""
def __init__(self):
self.crossrefs = []
super(Bibliography, self).__init__()
def validate(self, **kwargs):
"""
Validates each entry (passing the provided arguments down to them and
also tries to resolve all cross-references between the entries.
"""
self.check_crossrefs()
for value in self.values():
value.validate(**kwargs)
def check_crossrefs(self):
"""
Checks all crossreferences found in the bibliography. If one can not
be resolved from *this* Bibliography instance, a BrokenCrossReferences
exception is raised.
"""
broken = []
for value in self.values():
crossref = value.get('crossref')
if crossref is not None:
if crossref not in self:
broken.append(value)
if len(broken):
raise exceptions.BrokenCrossReferences('One or more cross reference could not'
' be resolved', broken)
def add(self, entry):
"""
Add an entry based on its ``name``-attribute to the Bibliography.
"""
self[entry.name] = entry
class Entry(dict):
"""
A slightly enhanced dict structure that acts as representation of an entry
within a bibliography. It also comes with a generic validator for required
and potentially unsupported fields and acts as base-class for the actual
entry-types commonly used in BibTeX.
"""
required_fields = ('title',)
optional_fields = ('key', )
def __init__(self, name=None, **kwargs):
super(Entry, self).__init__(**kwargs)
self.name = name
def validate(self, raise_unsupported=False):
"""
Checks if the Entry instance includes all the required fields of its
type. If ``raise_unsupported`` is set to ``True`` it will also check
for potentially unsupported types.
If a problem is found, an InvalidStructure exception is raised.
"""
fields = set(self.keys())
flattened_required_fields = set()
required_errors = []
for field in self.required_fields:
found = False
if isinstance(field, (list, tuple)):
# Check all alternatives
for real_f in field:
if real_f in fields:
flattened_required_fields.add(real_f)
found = True
else:
flattened_required_fields.add(field)
if field in fields:
found = True
if not found:
required_errors.append(field)
unsupported_fields = fields - flattened_required_fields \
- set(self.optional_fields)
if len(required_errors) or (raise_unsupported
and len(unsupported_fields)):
raise exceptions.InvalidStructure("Missing or unsupported fields found",
required_fields=required_errors,
unsupported_fields=unsupported_fields)
# The following required_fields/optiona_fields attributes are based on
# http://en.wikipedia.org/wiki/Bibtex
class Article(Entry):
"""Article in a journal, magazine etc."""
required_fields = ('author', 'title', 'journal', 'year')
optional_fields = ('volume', 'number', 'pages', 'month', 'note', 'key')
TypeRegistry.register('article', Article)
class Book(Entry):
"""A book that has already been published or at least has a publisher."""
required_fields = (('author', 'editor'), 'title', 'publisher', 'year')
optional_fields = ('address', 'pages', 'volume', 'series', 'edition',
'month', 'note', 'key')
TypeRegistry.register('book', Book)
class Booklet(Entry):
"""
Similar to a book in the sense that it is bound but without a "real"
publisher.
"""
required_fields = Entry.required_fields
optional_fields = ('author', 'howpublished', 'address', 'month', 'year',
'note', 'key')
TypeRegistry.register('booklet', Booklet)
class Incollection(Entry):
"""Part of a book but with its own title."""
required_fields = ('author', 'title', 'year', 'booktitle'),
optional_fields = ('editor', 'pages', 'organization', 'publisher',
'address', 'month', 'note', 'key')
TypeRegistry.register('incollection', Incollection)
class Inproceedings(Incollection):
"""Article that is part of a conference proceedings."""
pass
TypeRegistry.register('inproceedings', Inproceedings)
class Conference(Inproceedings):
"""Similar to ``Inproceedings``."""
required_fields = ('author', 'title', 'booktitle', 'year')
optional_fields = ('editor', 'pages', 'organization', 'publisher',
'address', 'month', 'note', 'key')
TypeRegistry.register('conference', Conference)
class Inbook(Entry):
"""Part of a book."""
required_fields = (('author', 'editor'), 'title', 'publisher', 'year',
('chapter', 'pages'))
optional_fields = ('volume', 'series', 'address', 'edition', 'month',
'note', 'key')
TypeRegistry.register('inbook', Inbook)
class Manual(Entry):
"""A technical manual."""
required_fields = ('title',)
optional_fields = ('author', 'organization', 'address', 'edition', 'year',
'month', 'note', 'key')
TypeRegistry.register('manual', Manual)
class Mastersthesis(Entry):
"""A Master's thesis"""
required_fields = ('author', 'title', 'school', 'year')
optional_fields = ('address', 'month', 'note', 'key')
TypeRegistry.register('mastersthesis', Mastersthesis)
class Misc(Entry):
"""Type of document that doesn't fit into any of the other categories."""
required_fields = []
optional_fields = ('author', 'title', 'howpublished', 'month', 'year',
'note', 'key')
TypeRegistry.register('misc', Misc)
class Phdthesis(Mastersthesis):
"""A Ph.D. thesis."""
pass
TypeRegistry.register('phdthesis', Phdthesis)
class Proceedings(Entry):
"""Conference proceedings."""
required_fields = ('title', 'year')
optional_fields = ('editor', 'publisher', 'organization', 'address',
'month', 'note', 'key')
TypeRegistry.register('proceedings', Proceedings)
class Techreport(Entry):
"""A technical report published by an institution."""
required_fields = ('author', 'title', 'institution', 'year')
optional_fields = ('type', 'number', 'address', 'month', 'note', 'key')
TypeRegistry.register('techreport', Techreport)
class Unpublished(Entry):
"""A not yet published document that already has an author and a title."""
required_fields = ('author', 'title', 'note',)
optional_fields = ('month', 'year', 'key',)
TypeRegistry.register('unpublished', Unpublished)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Optimizer ops for use in layers and tf.learn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.contrib import framework as contrib_framework
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables as vars_
from tensorflow.python.summary import summary
from tensorflow.python.training import moving_averages
from tensorflow.python.training import optimizer as optimizer_
from tensorflow.python.training import training as train
OPTIMIZER_CLS_NAMES = {
"Adagrad": train.AdagradOptimizer,
"Adam": train.AdamOptimizer,
"Ftrl": train.FtrlOptimizer,
"Momentum": train.MomentumOptimizer,
"RMSProp": train.RMSPropOptimizer,
"SGD": train.GradientDescentOptimizer,
}
OPTIMIZER_SUMMARIES = [
"learning_rate",
"loss",
"gradients",
"gradient_norm",
]
def optimize_loss(loss,
global_step,
learning_rate,
optimizer,
gradient_noise_scale=None,
gradient_multipliers=None,
clip_gradients=None,
learning_rate_decay_fn=None,
update_ops=None,
variables=None,
name=None,
summaries=None,
colocate_gradients_with_ops=False,
increment_global_step=True):
"""Given loss and parameters for optimizer, returns a training op.
Various ways of passing optimizers, include:
- string, name of the optimizer like 'SGD', 'Adam', see OPTIMIZER_CLS_NAMES
for full list. E.g. `optimize_loss(..., optimizer='Adam')`.
- function, takes learning rate `Tensor` as argument and must return
`Optimizer` instance. E.g. `optimize_loss(...,
optimizer=lambda lr: tf.train.MomentumOptimizer(lr, momentum=0.5))`.
Alternatively, if `learning_rate` is `None`, the function takes no
arguments. E.g. `optimize_loss(..., learning_rate=None,
optimizer=lambda: tf.train.MomentumOptimizer(0.5, momentum=0.5))`.
- class, subclass of `Optimizer` that takes only one required argument -
learning rate, such as AdamOptimizer, AdagradOptimizer.
E.g. `optimize_loss(..., optimizer=tf.train.AdagradOptimizer)`.
- object, instance of subclass of `Optimizer`.
E.g., `optimizer_loss(..., optimizer=tf.train.AdagradOptimizer(0.5))`.
Args:
loss: Scalar `Tensor`.
global_step: Scalar int `Tensor`, step counter to update on each step
unless `increment_global_step` is `False`. If not supplied,
it will be fetched from the default graph (see
`tf.train.get_global_step` for details). If it's
not been created, no step will be incremented with each weight
update. `learning_rate_decay_fn` requires `global_step`.
learning_rate: float or `Tensor`, magnitude of update per each training
step. Can be `None`.
optimizer: string, class or optimizer instance, used as trainer.
string should be name of optimizer, like 'SGD',
'Adam', 'Adagrad'. Full list in OPTIMIZER_CLS_NAMES constant.
class should be sub-class of `tf.Optimizer` that implements
`compute_gradients` and `apply_gradients` functions.
optimizer instance should be instantiation of `tf.Optimizer`
sub-class and have `compute_gradients` and `apply_gradients`
functions.
gradient_noise_scale: float or None, adds 0-mean normal noise scaled by this
value.
gradient_multipliers: dict of variables or variable names to floats.
If present, gradients for specified
variables will be multiplied by given constant.
clip_gradients: float, callable or `None`. If float, is provided, a global
clipping is applied to prevent the norm of the gradient to exceed this
value. Alternatively, a callable can be provided e.g.: adaptive_clipping.
This callable takes a `list` of `(gradients, variables)` `tuple`s and
returns the same thing with the gradients modified.
learning_rate_decay_fn: function, takes `learning_rate` and `global_step`
`Tensor`s, returns `Tensor`.
Can be used to implement any learning rate decay
functions.
For example: `tf.train.exponential_decay`.
Ignored if `learning_rate` is not supplied.
update_ops: list of update `Operation`s to execute at each step. If `None`,
uses elements of UPDATE_OPS collection. The order of execution
between `update_ops` and `loss` is non-deterministic.
variables: list of variables to optimize or
`None` to use all trainable variables.
name: The name for this operation is used to scope operations and summaries.
summaries: List of internal quantities to visualize on tensorboard. If not
set only the loss and the learning rate will be reported. The
complete list is in OPTIMIZER_SUMMARIES.
colocate_gradients_with_ops: If True, try colocating gradients with the
corresponding op.
increment_global_step: Whether to increment `global_step`. If your model
calls `optimize_loss` multiple times per training step (e.g. to optimize
different parts of the model), use this arg to avoid incrementing
`global_step` more times than necessary.
Returns:
Training op.
Raises:
ValueError: if:
* `loss` is an invalid type or shape.
* `global_step` is an invalid type or shape.
* `learning_rate` is an invalid type or value.
* `optimizer` is wrong type.
* `clip_gradients` is not float or callable.
* `learning_rate` and `learning_rate_decay_fn` are supplied, but no
`global_step` is available.
* `gradients` is empty
"""
loss = ops.convert_to_tensor(loss)
contrib_framework.assert_scalar(loss)
if global_step is None:
global_step = contrib_framework.get_global_step()
else:
contrib_framework.assert_global_step(global_step)
with vs.variable_scope(name, "OptimizeLoss", [loss, global_step]):
# Update ops take UPDATE_OPS collection if not provided.
if update_ops is None:
update_ops = set(ops.get_collection(ops.GraphKeys.UPDATE_OPS))
# Make sure update ops are ran before computing loss.
if update_ops:
loss = control_flow_ops.with_dependencies(list(update_ops), loss)
# Learning rate variable, with possible decay.
lr = None
if learning_rate is not None:
if (isinstance(learning_rate, ops.Tensor) and
learning_rate.get_shape().ndims == 0):
lr = learning_rate
elif isinstance(learning_rate, float):
if learning_rate < 0.0:
raise ValueError("Invalid learning_rate %s.", learning_rate)
lr = vs.get_variable(
"learning_rate", [],
trainable=False,
initializer=init_ops.constant_initializer(learning_rate))
else:
raise ValueError("Learning rate should be 0d Tensor or float. "
"Got %s of type %s" % (str(learning_rate),
str(type(learning_rate))))
if summaries is None:
summaries = ["loss", "learning_rate"]
else:
for summ in summaries:
if summ not in OPTIMIZER_SUMMARIES:
raise ValueError("Summaries should be one of [%s], you provided %s." %
(", ".join(OPTIMIZER_SUMMARIES), summ))
if learning_rate is not None and learning_rate_decay_fn is not None:
if global_step is None:
raise ValueError("global_step is required for learning_rate_decay_fn.")
lr = learning_rate_decay_fn(lr, global_step)
if "learning_rate" in summaries:
summary.scalar("learning_rate", lr)
# Create optimizer, given specified parameters.
if isinstance(optimizer, six.string_types):
if lr is None:
raise ValueError("Learning rate is None, but should be specified if "
"optimizer is string (%s)." % optimizer)
if optimizer not in OPTIMIZER_CLS_NAMES:
raise ValueError(
"Optimizer name should be one of [%s], you provided %s." %
(", ".join(OPTIMIZER_CLS_NAMES), optimizer))
opt = OPTIMIZER_CLS_NAMES[optimizer](learning_rate=lr)
elif (isinstance(optimizer, type) and
issubclass(optimizer, optimizer_.Optimizer)):
if lr is None:
raise ValueError("Learning rate is None, but should be specified if "
"optimizer is class (%s)." % optimizer)
opt = optimizer(learning_rate=lr)
elif isinstance(optimizer, optimizer_.Optimizer):
opt = optimizer
elif callable(optimizer):
if learning_rate is not None:
opt = optimizer(lr)
else:
opt = optimizer()
if not isinstance(opt, optimizer_.Optimizer):
raise ValueError("Unrecognized optimizer: function should return "
"subclass of Optimizer. Got %s." % str(opt))
else:
raise ValueError("Unrecognized optimizer: should be string, "
"subclass of Optimizer, instance of "
"subclass of Optimizer or function with one argument. "
"Got %s." % str(optimizer))
# All trainable variables, if specific variables are not specified.
if variables is None:
variables = vars_.trainable_variables()
# Compute gradients.
gradients = opt.compute_gradients(
loss,
variables,
colocate_gradients_with_ops=colocate_gradients_with_ops)
# Optionally add gradient noise.
if gradient_noise_scale is not None:
gradients = _add_scaled_noise_to_gradients(gradients,
gradient_noise_scale)
# Multiply some gradients.
if gradient_multipliers is not None:
gradients = _multiply_gradients(gradients, gradient_multipliers)
if not gradients:
raise ValueError(
"Empty list of (gradient, var) pairs encountered. This is most "
"likely to be caused by an improper value of gradient_multipliers.")
if "gradient_norm" in summaries:
summary.scalar("global_norm/gradient_norm",
clip_ops.global_norm(list(zip(*gradients))[0]))
# Optionally clip gradients by global norm.
if isinstance(clip_gradients, float):
gradients = _clip_gradients_by_norm(gradients, clip_gradients)
elif callable(clip_gradients):
gradients = clip_gradients(gradients)
elif clip_gradients is not None:
raise ValueError(
"Unknown type %s for clip_gradients" % type(clip_gradients))
# Add scalar summary for loss.
if "loss" in summaries:
summary.scalar("loss", loss)
# Add histograms for variables, gradients and gradient norms.
for gradient, variable in gradients:
if isinstance(gradient, ops.IndexedSlices):
grad_values = gradient.values
else:
grad_values = gradient
if grad_values is not None:
var_name = variable.name.replace(":", "_")
if "gradients" in summaries:
summary.histogram("gradients/%s" % var_name, grad_values)
if "gradient_norm" in summaries:
summary.scalar("gradient_norm/%s" % var_name,
clip_ops.global_norm([grad_values]))
if clip_gradients is not None and "gradient_norm" in summaries:
summary.scalar("global_norm/clipped_gradient_norm",
clip_ops.global_norm(list(zip(*gradients))[0]))
# Create gradient updates.
grad_updates = opt.apply_gradients(
gradients,
global_step=global_step if increment_global_step else None,
name="train")
# Ensure the train_tensor computes grad_updates.
train_tensor = control_flow_ops.with_dependencies([grad_updates], loss)
return train_tensor
def _clip_gradients_by_norm(grads_and_vars, clip_gradients):
"""Clips gradients by global norm."""
gradients, variables = zip(*grads_and_vars)
clipped_gradients, _ = clip_ops.clip_by_global_norm(gradients, clip_gradients)
return list(zip(clipped_gradients, variables))
def _adaptive_max_norm(norm, std_factor, decay, global_step, epsilon, name):
"""Find max_norm given norm and previous average."""
with vs.variable_scope(name, "AdaptiveMaxNorm", [norm]):
log_norm = math_ops.log(norm + epsilon)
def moving_average(name, value, decay):
moving_average_variable = vs.get_variable(
name,
shape=value.get_shape(),
dtype=value.dtype,
initializer=init_ops.zeros_initializer(),
trainable=False)
return moving_averages.assign_moving_average(
moving_average_variable, value, decay, zero_debias=False)
# quicker adaptation at the beginning
if global_step is not None:
n = math_ops.to_float(global_step)
decay = math_ops.minimum(decay, n / (n + 1.))
# update averages
mean = moving_average("mean", log_norm, decay)
sq_mean = moving_average("sq_mean", math_ops.square(log_norm), decay)
variance = sq_mean - math_ops.square(mean)
std = math_ops.sqrt(math_ops.maximum(epsilon, variance))
max_norms = math_ops.exp(mean + std_factor * std)
return max_norms, mean
def adaptive_clipping_fn(std_factor=2.,
decay=0.95,
static_max_norm=None,
global_step=None,
report_summary=False,
epsilon=1e-8,
name=None):
"""Adapt the clipping value using statistics on the norms.
Implement adaptive gradient as presented in section 3.2.1 of
https://arxiv.org/abs/1412.1602.
Keeps a moving average of the mean and std of the log(norm) of the gradient.
if the norm exceeds `exp(mean + std_factor*std)`, all gradients are rescaled
such that the global norm becomes `exp(mean)`.
Args:
std_factor: Python scaler (or tensor).
`max_norm = exp(mean + std_factor*std)`
decay: The smoothing factor of the moving averages.
static_max_norm: If provided, will threshold the norm to this value as an
extra safety.
global_step: Optional global_step. If provided, `decay = decay*n/(n+1)`.
This provides a quicker adaptation of the mean for the first steps.
report_summary: If `True`, will add histogram summaries of the `max_norm`.
epsilon: Small value chosen to avoid zero variance.
name: The name for this operation is used to scope operations and summaries.
Returns:
A function for applying gradient clipping.
"""
def gradient_clipping(grads_and_vars):
"""Internal function for adaptive clipping."""
grads, variables = zip(*grads_and_vars)
norm = clip_ops.global_norm(grads)
max_norm, log_mean = _adaptive_max_norm(norm, std_factor, decay,
global_step, epsilon, name)
# reports the max gradient norm for debugging
if report_summary:
summary.scalar("global_norm/adaptive_max_gradient_norm", max_norm)
# factor will be 1. if norm is smaller than max_norm
factor = array_ops.where(norm < max_norm,
array_ops.ones_like(norm),
math_ops.exp(log_mean) / norm)
if static_max_norm is not None:
factor = math_ops.minimum(static_max_norm / norm, factor)
# apply factor
clipped_grads = []
for grad in grads:
if grad is None:
clipped_grads.append(None)
elif isinstance(grad, ops.IndexedSlices):
clipped_grads.append(
ops.IndexedSlices(grad.values * factor, grad.indices,
grad.dense_shape))
else:
clipped_grads.append(grad * factor)
return list(zip(clipped_grads, variables))
return gradient_clipping
def _add_scaled_noise_to_gradients(grads_and_vars, gradient_noise_scale):
"""Adds scaled noise from a 0-mean normal distribution to gradients."""
gradients, variables = zip(*grads_and_vars)
noisy_gradients = []
for gradient in gradients:
if gradient is None:
noisy_gradients.append(None)
continue
if isinstance(gradient, ops.IndexedSlices):
gradient_shape = gradient.dense_shape
else:
gradient_shape = gradient.get_shape()
noise = random_ops.truncated_normal(gradient_shape) * gradient_noise_scale
noisy_gradients.append(gradient + noise)
return list(zip(noisy_gradients, variables))
def _multiply_gradients(grads_and_vars, gradient_multipliers):
"""Multiply specified gradients."""
multiplied_grads_and_vars = []
for grad, var in grads_and_vars:
if (grad is not None and
(var in gradient_multipliers or var.name in gradient_multipliers)):
key = var if var in gradient_multipliers else var.name
multiplier = constant_op.constant(
gradient_multipliers[key], dtype=dtypes.float32)
if isinstance(grad, ops.IndexedSlices):
grad_values = grad.values * multiplier
grad = ops.IndexedSlices(grad_values, grad.indices, grad.dense_shape)
else:
grad *= multiplier
multiplied_grads_and_vars.append((grad, var))
return multiplied_grads_and_vars
|
|
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import io
import os
import inspect
import abc
import textwrap
import base64
import functools
import docutils.core
import contextlib
import warnings
import itertools
import weakref
import copy
from operator import itemgetter
from collections.abc import Iterable
import numpy
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
# Avoid ambiguity between function name and usual variable name
from cycler import cycler as make_cycler
import mplcursors
from ipywidgets import widgets
from IPython.display import display
from lisa.utils import Loggable, get_subclasses, get_doc_url, get_short_doc, split_paragraphs, update_wrapper_doc, guess_format, is_running_ipython, nullcontext, measure_time
from lisa.trace import MissingTraceEventError, PandasDataDesc
from lisa.notebook import axis_link_dataframes, axis_cursor_delta, WrappingHBox, make_figure
from lisa.generic import TypedList
class AnalysisHelpers(Loggable, abc.ABC):
"""
Helper methods class for Analysis modules.
:Design notes:
Plotting methods *must* return the :class:`matplotlib.axes.Axes` instance
used by the plotting method. This lets users further modify them.
"""
@abc.abstractmethod
def name():
"""
Name of the analysis class.
"""
pass
@classmethod
def setup_plot(cls, width=16, height=4, ncols=1, nrows=1, interactive=None, link_dataframes=None, cursor_delta=None, **kwargs):
"""
Common helper for setting up a matplotlib plot
:param width: Width of the plot (inches)
:type width: int or float
:param height: Height of each subplot (inches)
:type height: int or float
:param ncols: Number of plots on a single row
:type ncols: int
:param nrows: Number of plots in a single column
:type nrows: int
:param link_dataframes: Link the provided dataframes to the axes using
:func:`lisa.notebook.axis_link_dataframes`
:type link_dataframes: list(pandas.DataFrame) or None
:param cursor_delta: Add two vertical lines set with left and right
clicks, and show the time delta between them in a widget.
:type cursor_delta: bool or None
:param interactive: If ``True``, use the pyplot API of matplotlib,
which integrates well with notebooks. However, it can lead to
memory leaks in scripts generating lots of plots, in which case it
is better to use the non-interactive API. Defaults to ``True`` when
running under IPython or Jupyter notebook, `False`` otherwise.
:type interactive: bool
:Keywords arguments: Extra arguments to pass to
:obj:`matplotlib.figure.Figure.subplots`
:returns: tuple(matplotlib.figure.Figure, matplotlib.axes.Axes (or an
array of, if ``nrows`` > 1))
"""
figure, axes = make_figure(
interactive=interactive,
width=width,
height=height,
ncols=ncols,
nrows=nrows,
**kwargs,
)
if interactive is None:
interactive = is_running_ipython()
use_widgets = interactive
if link_dataframes:
if not use_widgets:
cls.get_logger().error('Dataframes can only be linked to axes in interactive widget plots')
else:
for axis in figure.axes:
axis_link_dataframes(axis, link_dataframes)
if cursor_delta or cursor_delta is None and use_widgets:
if not use_widgets and cursor_delta is not None:
cls.get_logger().error('Cursor delta can only be used in interactive widget plots')
else:
for axis in figure.axes:
axis_cursor_delta(axis)
for axis in figure.axes:
axis.relim(visible_only=True)
axis.autoscale_view(True)
# Needed for multirow plots to not overlap with each other
figure.set_tight_layout(dict(h_pad=3.5))
return figure, axes
@classmethod
@contextlib.contextmanager
def set_axis_cycler(cls, axis, *cyclers):
"""
Context manager to set cyclers on an axis (and the default cycler as
well), and then restore the default cycler.
.. note:: The given cyclers are merged with the original cycler. The
given cyclers will override any key of the original cycler, and the
number of values will be adjusted to the maximum size between all
of them. This way of merging allows decoupling the length of all
keys.
"""
orig_cycler = plt.rcParams['axes.prop_cycle']
# Get the maximum value length among all cyclers involved
values_len = max(
len(values)
for values in itertools.chain(
orig_cycler.by_key().values(),
itertools.chain.from_iterable(
cycler.by_key().values()
for cycler in cyclers
),
)
)
# We can only add together cyclers with the same number of values for
# each key, so cycle through the provided values, up to the right
# length
def pad_values(values):
values = itertools.cycle(values)
values = itertools.islice(values, 0, values_len)
return list(values)
def pad_cycler(cycler):
keys = cycler.by_key()
return {
key: pad_values(values)
for key, values in keys.items()
}
cycler = {}
for user_cycler in cyclers:
cycler.update(pad_cycler(user_cycler))
# Merge the cyclers and original cycler together, so we still get the
# original values of the keys not overridden by the given cycler
parameters = {
**pad_cycler(orig_cycler),
**cycler,
}
cycler = make_cycler(**parameters)
def set_cycler(cycler):
plt.rcParams['axes.prop_cycle'] = cycler
axis.set_prop_cycle(cycler)
set_cycler(cycler)
try:
yield
finally:
# Since there is no way to get the cycler from an Axis,
# we cannot restore the original one, so use the
# default one instead
set_cycler(orig_cycler)
@classmethod
@contextlib.contextmanager
def set_axis_rc_params(cls, axis, rc_params):
"""
Context manager to set ``matplotlib.rcParams`` while plotting, and then
restore the default parameters.
"""
orig = matplotlib.rcParams.copy()
matplotlib.rcParams.update(rc_params)
try:
yield
finally:
# matplotlib complains about some deprecated settings being set, so
# silence it since we are just restoring the original state
with warnings.catch_warnings():
matplotlib.rcParams.update(orig)
@classmethod
def cycle_colors(cls, axis, nr_cycles=1):
"""
Cycle the axis color cycle ``nr_cycles`` forward
:param axis: The axis to manipulate
:type axis: matplotlib.axes.Axes
:param nr_cycles: The number of colors to cycle through.
:type nr_cycles: int
.. note::
This is an absolute cycle, as in, it will always start from the first
color defined in the color cycle.
"""
if nr_cycles < 1:
return
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
if nr_cycles > len(colors):
nr_cycles -= len(colors)
axis.set_prop_cycle(make_cycler(color=colors[nr_cycles:] + colors[:nr_cycles]))
@classmethod
def get_next_color(cls, axis):
"""
Get the next color that will be used to draw lines on the axis
:param axis: The axis
:type axis: matplotlib.axes.Axes
.. warning::
This will consume the color from the cycler, which means it will
change which color is to be used next.
"""
# XXX: We're accessing some private data here, so that could break eventually
# Need to find another way to get the current color from the cycler, or to
# plot all data from a dataframe in the same color.
return next(axis._get_lines.prop_cycler)['color']
def get_default_plot_path(self, img_format, plot_name, default_dir='.'):
"""
Return the default path to use to save plots for the analysis.
:param img_format: Format of the image to save.
:type img_format: str
:param plot_name: Middle-name of the plot
:type plot_name: str
:param default_dir: Default folder to store plots into.
:type default_dir: str
"""
analysis = self.name
filepath = os.path.join(
default_dir,
f"{analysis}.{plot_name}.{img_format}")
return filepath
def save_plot(self, figure, filepath=None, img_format=None):
"""
Save a :class:`matplotlib.figure.Figure` as an image file.
"""
img_format = img_format or guess_format(filepath) or 'png'
caller = inspect.stack()[1][3]
filepath = filepath or self.get_default_plot_path(
img_format=img_format,
plot_name=caller,
)
# The suptitle is not taken into account by tight layout by default:
# https://stackoverflow.com/questions/48917631/matplotlib-how-to-return-figure-suptitle
suptitle = figure._suptitle
figure.savefig(
filepath,
bbox_extra_artists=[suptitle] if suptitle else None,
format=img_format,
bbox_inches='tight'
)
def do_plot(self, plotter, axis=None, **kwargs):
"""
Simple helper for consistent behavior across methods.
"""
local_fig = axis is None
if local_fig:
fig, axis = self.setup_plot(**kwargs)
plotter(axis, local_fig)
return axis
@staticmethod
def _get_base64_image(axis, fmt='png'):
if isinstance(axis, numpy.ndarray):
axis = axis[0]
figure = axis.get_figure()
buff = io.BytesIO()
figure.savefig(buff, format=fmt, bbox_inches='tight')
buff.seek(0)
b64_image = base64.b64encode(buff.read())
return b64_image.decode('utf-8')
@classmethod
def get_plot_methods(cls, instance=None):
obj = instance if instance is not None else cls
def predicate(f):
if not callable(f):
return False
# "unwrap" bound methods and other similar things
with contextlib.suppress(AttributeError):
f = f.__func__
return (
f.__name__.startswith('plot_')
and f is not cls.plot_method.__func__
)
return [
f
for name, f in inspect.getmembers(obj, predicate=predicate)
]
_FIG_DATA = weakref.WeakKeyDictionary()
"""
Data that are related to a matplotlib figure and that must not be duplicated by each call.gqq
"""
@classmethod
def _get_fig_data(cls, fig, key):
return cls._FIG_DATA.setdefault(fig, {})[key]
def _set_fig_data(cls, fig, key, val):
cls._FIG_DATA.setdefault(fig, {})[key] = val
def _make_fig_toolbar(self, fig):
toolbar = WrappingHBox()
widget_list = []
label = 'Open in trace viewer'
open_button = widgets.Button(
description=label,
tooltip=label,
disabled=False,
)
open_button.on_click(lambda event: self.trace.show())
widget_list.append(open_button)
toolbar.children += tuple(widget_list)
return toolbar
@classmethod
def plot_method(cls, return_axis=False):
"""
Plot function decorator.
:param return_axis: If ``True``, the decorated method is expected to
return a :class:`matplotlib.axes.Axes` instance, by using
:meth:`do_plot` for example. Otherwise, it is expected to
take an ``axis`` and ``local_fig`` parameters like the ``plotter``
given to :meth:`do_plot` and just update the ``axis``.
:type return_axis: bool
It allows for automatic plot setup and HTML and reStructuredText output.
"""
def decorator(f):
@update_wrapper_doc(
f,
added_by=f':meth:`{AnalysisHelpers.__module__}.{AnalysisHelpers.__qualname__}.plot_method`',
description=textwrap.dedent("""
:returns: An :class:`matplotlib.axes.Axes` containing the plot,
or rich formats depending on ``output`` value.
:param axis: instance of :class:`matplotlib.axes.Axes` to plot into.
If `None`, a new figure and axis are created and returned.
:type axis: matplotlib.axes.Axes
or numpy.ndarray(matplotlib.axes.Axes)
or None
:param colors: List of color names to use for the plots.
:type colors: list(str) or None
:param linestyles: List of linestyle to use for the plots.
:type linestyles: list(str) or None
:param markers: List of marker to use for the plots.
:type markers: list(str) or None
:param rc_params: Matplotlib rc params dictionary overlaid on
existing settings.
:type rc_params: dict(str, object) or None
:param filepath: Path of the file to save the figure in. If
`None`, no file is saved.
:type filepath: str or None
:param always_save: When ``True``, the plot is always saved
even if no ``filepath`` has explicitly been set. In that
case, a default path will be used.
:type always_save: bool
:param img_format: The image format to generate. Defaults to
using filepath to guess the type, or "png" if no filepath is
given. `html` and `rst` are supported in addition to
matplotlib image formats.
:type img_format: str
:param output: Can be ``None`` to return a
:class:`matplotlib.axes.Axes`, ``html`` to return an HTML
document, or ``rst`` for a reStructuredText output.
:type output: str or None
:Variable keyword arguments: Forwarded to
:meth:`~lisa.analysis.base.AnalysisHelpers.setup_plot`
"""),
remove_params=['local_fig'],
include_kwargs=True,
)
def wrapper(self, *args, filepath=None, axis=None, output=None, img_format=None, always_save=False, colors: TypedList[str]=None, linestyles: TypedList[str]=None, markers: TypedList[str]=None, rc_params=None, **kwargs):
def is_f_param(param):
"""
Return True if the parameter is for `f`, False if it is
for setup_plot()
"""
try:
desc = inspect.signature(f).parameters[param]
except KeyError:
return False
else:
# Passing kwargs=42 to a function taking **kwargs
# should not return True here, as we only consider
# explicitly listed arguments
return desc.kind not in (
inspect.Parameter.VAR_KEYWORD,
inspect.Parameter.VAR_POSITIONAL,
)
# Factor the *args inside the **kwargs by binding them to the
# user-facing signature, which is the one of the wrapper.
kwargs.update(
inspect.signature(wrapper).bind_partial(self, *args).arguments
)
f_kwargs = {
param: val
for param, val in kwargs.items()
if is_f_param(param)
}
img_format = img_format or guess_format(filepath) or 'png'
local_fig = axis is None
# When we create the figure ourselves, always save the plot to
# the default location
if local_fig and filepath is None and always_save:
filepath = self.get_default_plot_path(
img_format=img_format,
plot_name=f.__name__,
)
cyclers = dict(
color=colors,
linestyle=linestyles,
marker=markers,
)
cyclers = {
name: value
for name, value in cyclers.items()
if value
}
if cyclers:
cyclers = [
make_cycler(**{name: value})
for name, value in cyclers.items()
]
set_cycler = lambda axis: cls.set_axis_cycler(axis, *cyclers)
else:
set_cycler = lambda axis: nullcontext()
if rc_params:
set_rc_params = lambda axis: cls.set_axis_rc_params(axis, rc_params)
else:
set_rc_params = lambda axis: nullcontext()
# Allow returning an axis directly, or just update a given axis
if return_axis:
# In that case, the function takes all the kwargs
with set_cycler(axis), set_rc_params(axis):
axis = f(**kwargs, axis=axis)
else:
if local_fig:
setup_plot_kwargs = {
param: val
for param, val in kwargs.items()
if param not in f_kwargs
}
fig, axis = self.setup_plot(**setup_plot_kwargs)
f_kwargs.update(
axis=axis,
local_fig=f_kwargs.get('local_fig', local_fig),
)
with set_cycler(axis), set_rc_params(axis):
f(**f_kwargs)
if isinstance(axis, numpy.ndarray):
fig = axis[0].get_figure()
else:
fig = axis.get_figure()
def resolve_formatter(fmt):
format_map = {
'rst': cls._get_rst_content,
'html': cls._get_html,
}
try:
return format_map[fmt]
except KeyError:
raise ValueError(f'Unsupported format: {fmt}')
if output is None:
out = axis
# Show the LISA figure toolbar
if is_running_ipython():
# Make sure we only add one button per figure
try:
toolbar = self._get_fig_data(fig, 'toolbar')
except KeyError:
toolbar = self._make_fig_toolbar(fig)
self._set_fig_data(fig, 'toolbar', toolbar)
display(toolbar)
mplcursors.cursor(fig)
else:
out = resolve_formatter(output)(f, [], f_kwargs, axis)
if filepath:
if img_format in ('html', 'rst'):
content = resolve_formatter(img_format)(f, [], f_kwargs, axis)
with open(filepath, 'wt', encoding='utf-8') as fd:
fd.write(content)
else:
fig.savefig(filepath, format=img_format, bbox_inches='tight')
return out
return wrapper
return decorator
@staticmethod
def _get_rst_header(f):
name = f.__name__
prefix = 'plot_'
if name.startswith(prefix):
name = name[len(prefix):]
name = name.replace('_', ' ').capitalize()
try:
url = get_doc_url(f)
doc_link = f'`[doc] <{url}>`_'
except Exception:
doc_link = ''
return textwrap.dedent(f"""
{name}
{'=' * len(name)}
{get_short_doc(f)} {doc_link}
"""
)
@classmethod
def _get_rst_content(cls, f, args, kwargs, axis):
kwargs = inspect.signature(f).bind_partial(*args, **kwargs)
kwargs.apply_defaults()
kwargs = kwargs.arguments
fmt = 'png'
b64_image = cls._get_base64_image(axis, fmt=fmt)
hidden_params = {
'self',
'axis',
'local_fig',
'filepath',
'axis',
'output',
'img_format',
'always_save',
'kwargs',
'colors',
'linestyles',
'markers',
'rc_params',
}
args_list = ', '.join(
f'{k}={v}'
for k, v in sorted(kwargs.items(), key=itemgetter(0))
if v is not None and k not in hidden_params
)
return textwrap.dedent(f"""
.. figure:: data:image/{fmt};base64,{b64_image}
:alt: {f.__qualname__}
:align: center
:width: 100%
{args_list}
""")
@classmethod
def _get_rst(cls, f, args, kwargs, axis):
return cls._get_rst_header(f) + '\n' + cls._get_rst_content(f, args, kwargs, axis)
@staticmethod
def _docutils_render(writer, rst, doctitle_xform=False):
overrides = {
'input_encoding': 'utf-8',
# enable/disable promotion of lone top-level section title
# to document title
'doctitle_xform': doctitle_xform,
'initial_header_level': 1,
# This level will silent unknown roles and directives
# error. It is necessary since we are rendering docstring
# written for Sphinx using docutils, which only understands
# plain reStructuredText
'report_level': 4,
# Set the line length to always accept our document, since it has a
# large base64-encoded image in it and docutils will otherwise just
# replace the document body with an error
'line_length_limit': len(rst) + 1,
}
parts = docutils.core.publish_parts(
source=rst, source_path=None,
destination_path=None,
writer_name=writer,
settings_overrides=overrides,
)
return parts
@classmethod
def _get_html(cls, *args, **kwargs):
rst = cls._get_rst(*args, **kwargs)
parts = cls._docutils_render(writer='html', rst=rst, doctitle_xform=True)
return parts['whole']
class TraceAnalysisBase(AnalysisHelpers):
"""
Base class for Analysis modules.
:param trace: input Trace object
:type trace: :class:`trace.Trace`
:Design notes:
Method depending on certain trace events *must* be decorated with
:meth:`lisa.trace.requires_events`
"""
def __init__(self, trace):
self.trace = trace
@classmethod
def cache(cls, f):
"""
Decorator to enable caching of the output of dataframe getter function
in the trace cache.
This will write the dataframe to the swap as well, so processing can be
skipped completely when possible.
"""
sig = inspect.signature(f)
ignored_kwargs = {
# self
list(sig.parameters.keys())[0],
}
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
# Express the arguments as kwargs-only
params = sig.bind(self, *args, **kwargs)
params.apply_defaults()
kwargs = dict(params.arguments)
trace = self.trace
spec = dict(
module=f.__module__,
func=f.__qualname__,
# Include the trace window in the spec since that influences
# what the analysis was seeing
trace_state=trace.trace_state,
# Make a deepcopy as it is critical that the PandasDataDesc is
# not modified under the hood once inserted in the cache
kwargs=copy.deepcopy({
k: v
for k, v in kwargs.items()
if k not in ignored_kwargs
}),
)
pd_desc = PandasDataDesc(spec=spec)
cache = trace._cache
write_swap = trace._write_swap
try:
df = cache.fetch(pd_desc)
except KeyError:
with measure_time() as measure:
df = f(**kwargs)
compute_cost = measure.exclusive_delta
cache.insert(pd_desc, df, compute_cost=compute_cost, write_swap=write_swap)
return df
return wrapper
@classmethod
def get_all_events(cls):
"""
Returns the set of all events used by any of the methods.
"""
def predicate(f):
return callable(f) and hasattr(f, 'used_events')
return set(itertools.chain.from_iterable(
attr.used_events.get_all_events()
for name, attr in inspect.getmembers(cls, predicate=predicate)
))
def get_default_plot_path(self, **kwargs):
return super().get_default_plot_path(
default_dir=self.trace.plots_dir,
**kwargs,
)
@classmethod
def get_analysis_classes(cls):
return {
subcls.name: subcls
for subcls in get_subclasses(cls)
# Classes without a "name" attribute directly defined in their
# scope will not get registered. That allows having unnamed
# intermediate base classes that are not meant to be exposed.
if 'name' in subcls.__dict__
}
@classmethod
def call_on_trace(cls, meth, trace, meth_kwargs):
"""
Call a method of a subclass on a given trace.
:param meth: Function (method) defined on a subclass.
:type meth: collections.abc.Callable
:param trace: Trace object to use
:type trace: lisa.trace.Trace
:param meth_kwargs: Dictionary of keyword arguments to pass to ``meth``
:type meth_kwargs: dict
It will create an instance of the right analysis, bind the function to
it and call the resulting bound method with ``meth_kwargs`` extra
keyword arguments.
"""
for subcls in cls.get_analysis_classes().values():
for name, f in inspect.getmembers(subcls):
if f is meth:
break
else:
continue
break
else:
raise ValueError(f'{meth.__qualname__} is not a method of any subclasses of {cls.__qualname__}')
# Create an analysis instance and bind the method to it
analysis = subcls(trace=trace)
meth = meth.__get__(analysis, type(analysis))
return meth(**meth_kwargs)
# vim :set tabstop=4 shiftwidth=4 expandtab textwidth=80
|
|
from ruamel import yaml
import great_expectations as ge
from great_expectations.core.batch import BatchRequest
context = ge.get_context()
# YAML
datasource_yaml = r"""
name: taxi_datasource
class_name: Datasource
module_name: great_expectations.datasource
execution_engine:
module_name: great_expectations.execution_engine
class_name: PandasExecutionEngine
data_connectors:
default_configured_data_connector_name:
class_name: ConfiguredAssetFilesystemDataConnector
base_directory: <MY DIRECTORY>/
assets:
yellow_tripdata:
pattern: yellow_tripdata_(.*)\.csv
group_names:
- month
"""
# Please note this override is only to provide good UX for docs and tests.
# In normal usage you'd set your path directly in the yaml above.
datasource_yaml = datasource_yaml.replace(
"<MY DIRECTORY>/", "../data/single_directory_one_data_asset/"
)
test_yaml = context.test_yaml_config(datasource_yaml, return_mode="report_object")
# Python
datasource_config = {
"name": "taxi_datasource",
"class_name": "Datasource",
"module_name": "great_expectations.datasource",
"execution_engine": {
"module_name": "great_expectations.execution_engine",
"class_name": "PandasExecutionEngine",
},
"data_connectors": {
"default_configured_data_connector_name": {
"class_name": "ConfiguredAssetFilesystemDataConnector",
"base_directory": "<MY DIRECTORY>/",
"assets": {
"yellow_tripdata": {
"pattern": r"yellow_tripdata_(.*)\.csv",
"group_names": ["month"],
}
},
},
},
}
# Please note this override is only to provide good UX for docs and tests.
# In normal usage you'd set your path directly in the code above.
datasource_config["data_connectors"]["default_configured_data_connector_name"][
"base_directory"
] = "../data/single_directory_one_data_asset/"
test_python = context.test_yaml_config(
yaml.dump(datasource_config), return_mode="report_object"
)
assert test_yaml == test_python
context.add_datasource(**datasource_config)
batch_request = BatchRequest(
datasource_name="taxi_datasource",
data_connector_name="default_configured_data_connector_name",
data_asset_name="yellow_tripdata",
)
context.create_expectation_suite(
expectation_suite_name="<MY EXPECTATION SUITE NAME>", overwrite_existing=True
)
validator = context.get_validator(
batch_request=batch_request,
expectation_suite_name="<MY EXPECTATION SUITE NAME>",
batch_identifiers={"month": "2019-02"},
)
print(validator.head())
# NOTE: The following code is only for testing and can be ignored by users.
assert isinstance(validator, ge.validator.validator.Validator)
assert [ds["name"] for ds in context.list_datasources()] == ["taxi_datasource"]
assert "yellow_tripdata" in set(
context.get_available_data_asset_names()["taxi_datasource"][
"default_configured_data_connector_name"
]
)
# YAML
datasource_yaml = r"""
name: taxi_datasource
class_name: Datasource
module_name: great_expectations.datasource
execution_engine:
module_name: great_expectations.execution_engine
class_name: PandasExecutionEngine
data_connectors:
default_inferred_data_connector_name:
class_name: ConfiguredAssetS3DataConnector
bucket: <MY S3 BUCKET>/
prefix: <MY S3 BUCKET PREFIX>/
assets:
yellow_tripdata:
pattern: yellow_tripdata_(.*)\.csv
group_names:
- month
"""
# Please note this override is only to provide good UX for docs and tests.
# In normal usage you'd set your path directly in the yaml above.
datasource_yaml = datasource_yaml.replace(
"<MY S3 BUCKET>/", "superconductive-docs-test"
)
datasource_yaml = datasource_yaml.replace(
"<MY S3 BUCKET PREFIX>/", "data/taxi_yellow_tripdata_samples/"
)
test_yaml = context.test_yaml_config(datasource_yaml, return_mode="report_object")
# Python
datasource_config = {
"name": "taxi_datasource",
"class_name": "Datasource",
"module_name": "great_expectations.datasource",
"execution_engine": {
"module_name": "great_expectations.execution_engine",
"class_name": "PandasExecutionEngine",
},
"data_connectors": {
"default_inferred_data_connector_name": {
"class_name": "ConfiguredAssetS3DataConnector",
"bucket": "<MY S3 BUCKET>/",
"prefix": "<MY S3 BUCKET PREFIX>/",
"assets": {
"yellow_tripdata": {
"group_names": ["month"],
"pattern": r"yellow_tripdata_(.*)\.csv",
},
},
},
},
}
# Please note this override is only to provide good UX for docs and tests.
# In normal usage you'd set your path directly in the code above.
datasource_config["data_connectors"]["default_inferred_data_connector_name"][
"bucket"
] = "superconductive-docs-test"
datasource_config["data_connectors"]["default_inferred_data_connector_name"][
"prefix"
] = "data/taxi_yellow_tripdata_samples/"
test_python = context.test_yaml_config(
yaml.dump(datasource_config), return_mode="report_object"
)
assert test_yaml == test_python
context.add_datasource(**datasource_config)
assert [ds["name"] for ds in context.list_datasources()] == ["taxi_datasource"]
assert "yellow_tripdata" in set(
context.get_available_data_asset_names()["taxi_datasource"][
"default_inferred_data_connector_name"
]
)
# YAML
datasource_yaml = r"""
name: taxi_datasource
class_name: Datasource
module_name: great_expectations.datasource
execution_engine:
module_name: great_expectations.execution_engine
class_name: PandasExecutionEngine
data_connectors:
default_configured_data_connector_name:
class_name: ConfiguredAssetFilesystemDataConnector
base_directory: <MY DIRECTORY>/
assets:
yellow_tripdata:
pattern: (.*)\.csv
group_names:
- month
"""
# Please note this override is only to provide good UX for docs and tests.
# In normal usage you'd set your path directly in the yaml above.
datasource_yaml = datasource_yaml.replace(
"<MY DIRECTORY>/", "../data/single_directory_one_data_asset/"
)
test_yaml = context.test_yaml_config(datasource_yaml, return_mode="report_object")
# Python
datasource_config = {
"name": "taxi_datasource",
"class_name": "Datasource",
"module_name": "great_expectations.datasource",
"execution_engine": {
"module_name": "great_expectations.execution_engine",
"class_name": "PandasExecutionEngine",
},
"data_connectors": {
"default_configured_data_connector_name": {
"class_name": "ConfiguredAssetFilesystemDataConnector",
"base_directory": "<MY DIRECTORY>/",
"assets": {
"yellow_tripdata": {
"pattern": r"yellow_tripdata_(.*)\.csv",
"group_names": ["month"],
}
},
},
},
}
# Please note this override is only to provide good UX for docs and tests.
# In normal usage you'd set your path directly in the code above.
datasource_config["data_connectors"]["default_configured_data_connector_name"][
"base_directory"
] = "../data/single_directory_one_data_asset/"
test_python = context.test_yaml_config(
yaml.dump(datasource_config), return_mode="report_object"
)
assert test_yaml == test_python
context.add_datasource(**datasource_config)
batch_request = BatchRequest(
datasource_name="taxi_datasource",
data_connector_name="default_configured_data_connector_name",
data_asset_name="yellow_tripdata",
)
validator = context.get_validator(
batch_request=batch_request,
expectation_suite_name="<MY EXPECTATION SUITE NAME>",
batch_identifiers={"month": "2019-02"},
)
# NOTE: The following code is only for testing and can be ignored by users.
assert isinstance(validator, ge.validator.validator.Validator)
assert [ds["name"] for ds in context.list_datasources()] == ["taxi_datasource"]
assert "yellow_tripdata" in set(
context.get_available_data_asset_names()["taxi_datasource"][
"default_configured_data_connector_name"
]
)
# YAML
datasource_yaml = r"""
name: taxi_datasource
class_name: Datasource
module_name: great_expectations.datasource
execution_engine:
module_name: great_expectations.execution_engine
class_name: PandasExecutionEngine
data_connectors:
default_configured_data_connector_name:
class_name: ConfiguredAssetFilesystemDataConnector
base_directory: <MY DIRECTORY>/
assets:
yellow_tripdata:
pattern: green_tripdata_(.*)\.csv
group_names:
- month
"""
# Please note this override is only to provide good UX for docs and tests.
# In normal usage you'd set your path directly in the yaml above.
datasource_yaml = datasource_yaml.replace(
"<MY DIRECTORY>/", "../data/single_directory_one_data_asset/"
)
test_yaml = context.test_yaml_config(datasource_yaml, return_mode="report_object")
# Python
datasource_config = {
"name": "taxi_datasource",
"class_name": "Datasource",
"module_name": "great_expectations.datasource",
"execution_engine": {
"module_name": "great_expectations.execution_engine",
"class_name": "PandasExecutionEngine",
},
"data_connectors": {
"default_configured_data_connector_name": {
"class_name": "ConfiguredAssetFilesystemDataConnector",
"base_directory": "<MY DIRECTORY>/",
"assets": {
"yellow_tripdata": {
"pattern": r"green_tripdata_(.*)\.csv",
"group_names": ["month"],
}
},
},
},
}
# Please note this override is only to provide good UX for docs and tests.
# In normal usage you'd set your path directly in the code above.
datasource_config["data_connectors"]["default_configured_data_connector_name"][
"base_directory"
] = "../data/single_directory_one_data_asset/"
test_python = context.test_yaml_config(
yaml.dump(datasource_config), return_mode="report_object"
)
# NOTE: The following code is only for testing and can be ignored by users.
assert test_yaml == test_python
assert [ds["name"] for ds in context.list_datasources()] == ["taxi_datasource"]
assert "yellow_tripdata" in set(
context.get_available_data_asset_names()["taxi_datasource"][
"default_configured_data_connector_name"
]
)
# YAML
datasource_yaml = r"""
name: taxi_datasource
class_name: Datasource
module_name: great_expectations.datasource
execution_engine:
module_name: great_expectations.execution_engine
class_name: PandasExecutionEngine
data_connectors:
default_configured_data_connector_name:
class_name: ConfiguredAssetFilesystemDataConnector
base_directory: <MY DIRECTORY>/
assets:
yellow_tripdata:
pattern: yellow_tripdata_(\d{4})-(\d{2})\.csv
group_names:
- year
- month
green_tripdata:
pattern: green_tripdata_(\d{4})-(\d{2})\.csv
group_names:
- year
- month
"""
# Please note this override is only to provide good UX for docs and tests.
# In normal usage you'd set your path directly in the yaml above.
datasource_yaml = datasource_yaml.replace(
"<MY DIRECTORY>/", "../data/single_directory_two_data_assets/"
)
test_yaml = context.test_yaml_config(datasource_yaml, return_mode="report_object")
# Python
datasource_config = {
"name": "taxi_datasource",
"class_name": "Datasource",
"module_name": "great_expectations.datasource",
"execution_engine": {
"module_name": "great_expectations.execution_engine",
"class_name": "PandasExecutionEngine",
},
"data_connectors": {
"default_configured_data_connector_name": {
"class_name": "ConfiguredAssetFilesystemDataConnector",
"base_directory": "<MY DIRECTORY>/",
"assets": {
"yellow_tripdata": {
"pattern": r"yellow_tripdata_(\d{4})-(\d{2})\.csv",
"group_names": ["year", "month"],
},
"green_tripdata": {
"pattern": r"green_tripdata_(\d{4})-(\d{2})\.csv",
"group_names": ["year", "month"],
},
},
},
},
}
# Please note this override is only to provide good UX for docs and tests.
# In normal usage you'd set your path directly in the code above.
datasource_config["data_connectors"]["default_configured_data_connector_name"][
"base_directory"
] = "../data/single_directory_two_data_assets/"
test_python = context.test_yaml_config(
yaml.dump(datasource_config), return_mode="report_object"
)
# NOTE: The following code is only for testing and can be ignored by users.
# TODO: Uncomment the line below once ISSUE #3589 (https://github.com/great-expectations/great_expectations/issues/3589) is resolved
# assert test_yaml == test_python
assert [ds["name"] for ds in context.list_datasources()] == ["taxi_datasource"]
assert "yellow_tripdata" in set(
context.get_available_data_asset_names()["taxi_datasource"][
"default_configured_data_connector_name"
]
)
assert "green_tripdata" in set(
context.get_available_data_asset_names()["taxi_datasource"][
"default_configured_data_connector_name"
]
)
# YAML
datasource_yaml = r"""
name: taxi_datasource
class_name: Datasource
module_name: great_expectations.datasource
execution_engine:
module_name: great_expectations.execution_engine
class_name: PandasExecutionEngine
data_connectors:
default_configured_data_connector_name:
class_name: ConfiguredAssetFilesystemDataConnector
base_directory: <MY DIRECTORY>/
assets:
yellow_tripdata:
base_directory: yellow_tripdata/
pattern: yellow_tripdata_(\d{4})-(\d{2})\.csv
group_names:
- year
- month
green_tripdata:
base_directory: green_tripdata/
pattern: (\d{4})-(\d{2})\.csv
group_names:
- year
- month
"""
# Please note this override is only to provide good UX for docs and tests.
# In normal usage you'd set your path directly in the yaml above.
datasource_yaml = datasource_yaml.replace(
"<MY DIRECTORY>/", "../data/nested_directories_data_asset/"
)
test_yaml = context.test_yaml_config(datasource_yaml, return_mode="report_object")
# Python
datasource_config = {
"name": "taxi_datasource",
"class_name": "Datasource",
"module_name": "great_expectations.datasource",
"execution_engine": {
"module_name": "great_expectations.execution_engine",
"class_name": "PandasExecutionEngine",
},
"data_connectors": {
"default_configured_data_connector_name": {
"class_name": "ConfiguredAssetFilesystemDataConnector",
"base_directory": "<MY DIRECTORY>/",
"assets": {
"yellow_tripdata": {
"base_directory": "yellow_tripdata/",
"pattern": r"yellow_tripdata_(\d{4})-(\d{2})\.csv",
"group_names": ["year", "month"],
},
"green_tripdata": {
"base_directory": "green_tripdata/",
"pattern": r"(\d{4})-(\d{2})\.csv",
"group_names": ["year", "month"],
},
},
},
},
}
# Please note this override is only to provide good UX for docs and tests.
# In normal usage you'd set your path directly in the code above.
datasource_config["data_connectors"]["default_configured_data_connector_name"][
"base_directory"
] = "../data/nested_directories_data_asset/"
test_python = context.test_yaml_config(
yaml.dump(datasource_config), return_mode="report_object"
)
# NOTE: The following code is only for testing and can be ignored by users.
assert test_yaml == test_python
assert [ds["name"] for ds in context.list_datasources()] == ["taxi_datasource"]
assert "yellow_tripdata" in set(
context.get_available_data_asset_names()["taxi_datasource"][
"default_configured_data_connector_name"
]
)
assert "green_tripdata" in set(
context.get_available_data_asset_names()["taxi_datasource"][
"default_configured_data_connector_name"
]
)
# YAML
datasource_yaml = r"""
name: taxi_datasource
class_name: Datasource
module_name: great_expectations.datasource
execution_engine:
module_name: great_expectations.execution_engine
class_name: PandasExecutionEngine
data_connectors:
default_configured_data_connector_name:
class_name: ConfiguredAssetFilesystemDataConnector
base_directory: <MY DIRECTORY>/
default_regex:
pattern: (.*)_(\d{4})-(\d{2})\.(csv|txt)$
group_names:
- data_asset_name
- year
- month
assets:
yellow_tripdata:
base_directory: yellow/tripdata/
glob_directive: "*.txt"
green_tripdata:
base_directory: green_tripdata/
glob_directive: "*.csv"
"""
# Please note this override is only to provide good UX for docs and tests.
# In normal usage you'd set your path directly in the yaml above.
datasource_yaml = datasource_yaml.replace(
"<MY DIRECTORY>/", "../data/nested_directories_complex/"
)
test_yaml = context.test_yaml_config(datasource_yaml, return_mode="report_object")
# Python
datasource_config = {
"name": "taxi_datasource",
"class_name": "Datasource",
"module_name": "great_expectations.datasource",
"execution_engine": {
"module_name": "great_expectations.execution_engine",
"class_name": "PandasExecutionEngine",
},
"data_connectors": {
"default_configured_data_connector_name": {
"class_name": "ConfiguredAssetFilesystemDataConnector",
"base_directory": "<MY DIRECTORY>/",
"default_regex": {
"pattern": r"(.*)_(\d{4})-(\d{2})\.(csv|txt)$",
"group_names": ["data_asset_name", "year", "month"],
},
"assets": {
"yellow_tripdata": {
"base_directory": "yellow/tripdata/",
"glob_directive": "*.txt",
},
"green_tripdata": {
"base_directory": "green_tripdata/",
"glob_directive": "*.csv",
},
},
},
},
}
# Please note this override is only to provide good UX for docs and tests.
# In normal usage you'd set your path directly in the code above.
datasource_config["data_connectors"]["default_configured_data_connector_name"][
"base_directory"
] = "../data/nested_directories_complex/"
test_python = context.test_yaml_config(
yaml.dump(datasource_config), return_mode="report_object"
)
# NOTE: The following code is only for testing and can be ignored by users.
assert test_yaml == test_python
assert [ds["name"] for ds in context.list_datasources()] == ["taxi_datasource"]
assert "yellow_tripdata" in set(
context.get_available_data_asset_names()["taxi_datasource"][
"default_configured_data_connector_name"
]
)
assert "green_tripdata" in set(
context.get_available_data_asset_names()["taxi_datasource"][
"default_configured_data_connector_name"
]
)
|
|
"""Function signature objects for callables
Back port of Python 3.3's function signature tools from the inspect module,
modified to be compatible with Python 2.6, 2.7 and 3.2+.
"""
#-----------------------------------------------------------------------------
# Python 3.3 stdlib inspect.py is public domain
#
# Backports Copyright (C) 2013 Aaron Iles
# Used under Apache License Version 2.0
#
# Further Changes are Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import itertools
import functools
import re
import types
# patch for single-file
# we don't support 2.6, so we can just import OrderedDict
from collections import OrderedDict
__version__ = '0.3'
# end patch
__all__ = ['BoundArguments', 'Parameter', 'Signature', 'signature']
_WrapperDescriptor = type(type.__call__)
_MethodWrapper = type(all.__call__)
_NonUserDefinedCallables = (_WrapperDescriptor,
_MethodWrapper,
types.BuiltinFunctionType)
def formatannotation(annotation, base_module=None):
if isinstance(annotation, type):
if annotation.__module__ in ('builtins', '__builtin__', base_module):
return annotation.__name__
return annotation.__module__+'.'+annotation.__name__
return repr(annotation)
def _get_user_defined_method(cls, method_name, *nested):
try:
if cls is type:
return
meth = getattr(cls, method_name)
for name in nested:
meth = getattr(meth, name, meth)
except AttributeError:
return
else:
if not isinstance(meth, _NonUserDefinedCallables):
# Once '__signature__' will be added to 'C'-level
# callables, this check won't be necessary
return meth
def signature(obj):
'''Get a signature object for the passed callable.'''
if not callable(obj):
raise TypeError('{0!r} is not a callable object'.format(obj))
if isinstance(obj, types.MethodType):
# In this case we skip the first parameter of the underlying
# function (usually `self` or `cls`).
sig = signature(obj.__func__)
return sig.replace(parameters=tuple(sig.parameters.values())[1:])
try:
sig = obj.__signature__
except AttributeError:
pass
else:
if sig is not None:
return sig
try:
# Was this function wrapped by a decorator?
wrapped = obj.__wrapped__
except AttributeError:
pass
else:
return signature(wrapped)
if isinstance(obj, types.FunctionType):
return Signature.from_function(obj)
if isinstance(obj, functools.partial):
sig = signature(obj.func)
new_params = OrderedDict(sig.parameters.items())
partial_args = obj.args or ()
partial_keywords = obj.keywords or {}
try:
ba = sig.bind_partial(*partial_args, **partial_keywords)
except TypeError as ex:
msg = 'partial object {0!r} has incorrect arguments'.format(obj)
raise ValueError(msg)
for arg_name, arg_value in ba.arguments.items():
param = new_params[arg_name]
if arg_name in partial_keywords:
# We set a new default value, because the following code
# is correct:
#
# >>> def foo(a): print(a)
# >>> print(partial(partial(foo, a=10), a=20)())
# 20
# >>> print(partial(partial(foo, a=10), a=20)(a=30))
# 30
#
# So, with 'partial' objects, passing a keyword argument is
# like setting a new default value for the corresponding
# parameter
#
# We also mark this parameter with '_partial_kwarg'
# flag. Later, in '_bind', the 'default' value of this
# parameter will be added to 'kwargs', to simulate
# the 'functools.partial' real call.
new_params[arg_name] = param.replace(default=arg_value,
_partial_kwarg=True)
elif (param.kind not in (_VAR_KEYWORD, _VAR_POSITIONAL) and
not param._partial_kwarg):
new_params.pop(arg_name)
return sig.replace(parameters=new_params.values())
sig = None
if isinstance(obj, type):
# obj is a class or a metaclass
# First, let's see if it has an overloaded __call__ defined
# in its metaclass
call = _get_user_defined_method(type(obj), '__call__')
if call is not None:
sig = signature(call)
else:
# Now we check if the 'obj' class has a '__new__' method
new = _get_user_defined_method(obj, '__new__')
if new is not None:
sig = signature(new)
else:
# Finally, we should have at least __init__ implemented
init = _get_user_defined_method(obj, '__init__')
if init is not None:
sig = signature(init)
elif not isinstance(obj, _NonUserDefinedCallables):
# An object with __call__
# We also check that the 'obj' is not an instance of
# _WrapperDescriptor or _MethodWrapper to avoid
# infinite recursion (and even potential segfault)
call = _get_user_defined_method(type(obj), '__call__', 'im_func')
if call is not None:
sig = signature(call)
if sig is not None:
return sig
if isinstance(obj, types.BuiltinFunctionType):
# Raise a nicer error message for builtins
msg = 'no signature found for builtin function {0!r}'.format(obj)
raise ValueError(msg)
raise ValueError('callable {0!r} is not supported by signature'.format(obj))
class _void(object):
'''A private marker - used in Parameter & Signature'''
class _empty(object):
pass
class _ParameterKind(int):
def __new__(self, *args, **kwargs):
obj = int.__new__(self, *args)
obj._name = kwargs['name']
return obj
def __str__(self):
return self._name
def __repr__(self):
return '<_ParameterKind: {0!r}>'.format(self._name)
_POSITIONAL_ONLY = _ParameterKind(0, name='POSITIONAL_ONLY')
_POSITIONAL_OR_KEYWORD = _ParameterKind(1, name='POSITIONAL_OR_KEYWORD')
_VAR_POSITIONAL = _ParameterKind(2, name='VAR_POSITIONAL')
_KEYWORD_ONLY = _ParameterKind(3, name='KEYWORD_ONLY')
_VAR_KEYWORD = _ParameterKind(4, name='VAR_KEYWORD')
class Parameter(object):
'''Represents a parameter in a function signature.
Has the following public attributes:
* name : str
The name of the parameter as a string.
* default : object
The default value for the parameter if specified. If the
parameter has no default value, this attribute is not set.
* annotation
The annotation for the parameter if specified. If the
parameter has no annotation, this attribute is not set.
* kind : str
Describes how argument values are bound to the parameter.
Possible values: `Parameter.POSITIONAL_ONLY`,
`Parameter.POSITIONAL_OR_KEYWORD`, `Parameter.VAR_POSITIONAL`,
`Parameter.KEYWORD_ONLY`, `Parameter.VAR_KEYWORD`.
'''
__slots__ = ('_name', '_kind', '_default', '_annotation', '_partial_kwarg')
POSITIONAL_ONLY = _POSITIONAL_ONLY
POSITIONAL_OR_KEYWORD = _POSITIONAL_OR_KEYWORD
VAR_POSITIONAL = _VAR_POSITIONAL
KEYWORD_ONLY = _KEYWORD_ONLY
VAR_KEYWORD = _VAR_KEYWORD
empty = _empty
def __init__(self, name, kind, default=_empty, annotation=_empty,
_partial_kwarg=False):
if kind not in (_POSITIONAL_ONLY, _POSITIONAL_OR_KEYWORD,
_VAR_POSITIONAL, _KEYWORD_ONLY, _VAR_KEYWORD):
raise ValueError("invalid value for 'Parameter.kind' attribute")
self._kind = kind
if default is not _empty:
if kind in (_VAR_POSITIONAL, _VAR_KEYWORD):
msg = '{0} parameters cannot have default values'.format(kind)
raise ValueError(msg)
self._default = default
self._annotation = annotation
if name is None:
if kind != _POSITIONAL_ONLY:
raise ValueError("None is not a valid name for a "
"non-positional-only parameter")
self._name = name
else:
name = str(name)
if kind != _POSITIONAL_ONLY and not re.match(r'[a-z_]\w*$', name, re.I):
msg = '{0!r} is not a valid parameter name'.format(name)
raise ValueError(msg)
self._name = name
self._partial_kwarg = _partial_kwarg
@property
def name(self):
return self._name
@property
def default(self):
return self._default
@property
def annotation(self):
return self._annotation
@property
def kind(self):
return self._kind
def replace(self, name=_void, kind=_void, annotation=_void,
default=_void, _partial_kwarg=_void):
'''Creates a customized copy of the Parameter.'''
if name is _void:
name = self._name
if kind is _void:
kind = self._kind
if annotation is _void:
annotation = self._annotation
if default is _void:
default = self._default
if _partial_kwarg is _void:
_partial_kwarg = self._partial_kwarg
return type(self)(name, kind, default=default, annotation=annotation,
_partial_kwarg=_partial_kwarg)
def __str__(self):
kind = self.kind
formatted = self._name
if kind == _POSITIONAL_ONLY:
if formatted is None:
formatted = ''
formatted = '<{0}>'.format(formatted)
# Add annotation and default value
if self._annotation is not _empty:
formatted = '{0}:{1}'.format(formatted,
formatannotation(self._annotation))
if self._default is not _empty:
formatted = '{0}={1}'.format(formatted, repr(self._default))
if kind == _VAR_POSITIONAL:
formatted = '*' + formatted
elif kind == _VAR_KEYWORD:
formatted = '**' + formatted
return formatted
def __repr__(self):
return '<{0} at {1:#x} {2!r}>'.format(self.__class__.__name__,
id(self), self.name)
def __hash__(self):
msg = "unhashable type: '{0}'".format(self.__class__.__name__)
raise TypeError(msg)
def __eq__(self, other):
return (issubclass(other.__class__, Parameter) and
self._name == other._name and
self._kind == other._kind and
self._default == other._default and
self._annotation == other._annotation)
def __ne__(self, other):
return not self.__eq__(other)
class BoundArguments(object):
'''Result of `Signature.bind` call. Holds the mapping of arguments
to the function's parameters.
Has the following public attributes:
* arguments : OrderedDict
An ordered mutable mapping of parameters' names to arguments' values.
Does not contain arguments' default values.
* signature : Signature
The Signature object that created this instance.
* args : tuple
Tuple of positional arguments values.
* kwargs : dict
Dict of keyword arguments values.
'''
def __init__(self, signature, arguments):
self.arguments = arguments
self._signature = signature
@property
def signature(self):
return self._signature
@property
def args(self):
args = []
for param_name, param in self._signature.parameters.items():
if (param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY) or
param._partial_kwarg):
# Keyword arguments mapped by 'functools.partial'
# (Parameter._partial_kwarg is True) are mapped
# in 'BoundArguments.kwargs', along with VAR_KEYWORD &
# KEYWORD_ONLY
break
try:
arg = self.arguments[param_name]
except KeyError:
# We're done here. Other arguments
# will be mapped in 'BoundArguments.kwargs'
break
else:
if param.kind == _VAR_POSITIONAL:
# *args
args.extend(arg)
else:
# plain argument
args.append(arg)
return tuple(args)
@property
def kwargs(self):
kwargs = {}
kwargs_started = False
for param_name, param in self._signature.parameters.items():
if not kwargs_started:
if (param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY) or
param._partial_kwarg):
kwargs_started = True
else:
if param_name not in self.arguments:
kwargs_started = True
continue
if not kwargs_started:
continue
try:
arg = self.arguments[param_name]
except KeyError:
pass
else:
if param.kind == _VAR_KEYWORD:
# **kwargs
kwargs.update(arg)
else:
# plain keyword argument
kwargs[param_name] = arg
return kwargs
def __hash__(self):
msg = "unhashable type: '{0}'".format(self.__class__.__name__)
raise TypeError(msg)
def __eq__(self, other):
return (issubclass(other.__class__, BoundArguments) and
self.signature == other.signature and
self.arguments == other.arguments)
def __ne__(self, other):
return not self.__eq__(other)
class Signature(object):
'''A Signature object represents the overall signature of a function.
It stores a Parameter object for each parameter accepted by the
function, as well as information specific to the function itself.
A Signature object has the following public attributes and methods:
* parameters : OrderedDict
An ordered mapping of parameters' names to the corresponding
Parameter objects (keyword-only arguments are in the same order
as listed in `code.co_varnames`).
* return_annotation : object
The annotation for the return type of the function if specified.
If the function has no annotation for its return type, this
attribute is not set.
* bind(*args, **kwargs) -> BoundArguments
Creates a mapping from positional and keyword arguments to
parameters.
* bind_partial(*args, **kwargs) -> BoundArguments
Creates a partial mapping from positional and keyword arguments
to parameters (simulating 'functools.partial' behavior.)
'''
__slots__ = ('_return_annotation', '_parameters')
_parameter_cls = Parameter
_bound_arguments_cls = BoundArguments
empty = _empty
def __init__(self, parameters=None, return_annotation=_empty,
__validate_parameters__=True):
'''Constructs Signature from the given list of Parameter
objects and 'return_annotation'. All arguments are optional.
'''
if parameters is None:
params = OrderedDict()
else:
if __validate_parameters__:
params = OrderedDict()
top_kind = _POSITIONAL_ONLY
for idx, param in enumerate(parameters):
kind = param.kind
if kind < top_kind:
msg = 'wrong parameter order: {0} before {1}'
msg = msg.format(top_kind, param.kind)
raise ValueError(msg)
else:
top_kind = kind
name = param.name
if name is None:
name = str(idx)
param = param.replace(name=name)
if name in params:
msg = 'duplicate parameter name: {0!r}'.format(name)
raise ValueError(msg)
params[name] = param
else:
params = OrderedDict(((param.name, param)
for param in parameters))
self._parameters = params
self._return_annotation = return_annotation
@classmethod
def from_function(cls, func):
'''Constructs Signature for the given python function'''
if not isinstance(func, types.FunctionType):
raise TypeError('{0!r} is not a Python function'.format(func))
Parameter = cls._parameter_cls
# Parameter information.
func_code = func.__code__
pos_count = func_code.co_argcount
arg_names = func_code.co_varnames
positional = tuple(arg_names[:pos_count])
keyword_only_count = getattr(func_code, 'co_kwonlyargcount', 0)
keyword_only = arg_names[pos_count:(pos_count + keyword_only_count)]
annotations = getattr(func, '__annotations__', {})
defaults = func.__defaults__
kwdefaults = getattr(func, '__kwdefaults__', None)
if defaults:
pos_default_count = len(defaults)
else:
pos_default_count = 0
parameters = []
# Non-keyword-only parameters w/o defaults.
non_default_count = pos_count - pos_default_count
for name in positional[:non_default_count]:
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_POSITIONAL_OR_KEYWORD))
# ... w/ defaults.
for offset, name in enumerate(positional[non_default_count:]):
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_POSITIONAL_OR_KEYWORD,
default=defaults[offset]))
# *args
if func_code.co_flags & 0x04:
name = arg_names[pos_count + keyword_only_count]
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_VAR_POSITIONAL))
# Keyword-only parameters.
for name in keyword_only:
default = _empty
if kwdefaults is not None:
default = kwdefaults.get(name, _empty)
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_KEYWORD_ONLY,
default=default))
# **kwargs
if func_code.co_flags & 0x08:
index = pos_count + keyword_only_count
if func_code.co_flags & 0x04:
index += 1
name = arg_names[index]
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_VAR_KEYWORD))
return cls(parameters,
return_annotation=annotations.get('return', _empty),
__validate_parameters__=False)
@property
def parameters(self):
try:
return types.MappingProxyType(self._parameters)
except AttributeError:
return OrderedDict(self._parameters.items())
@property
def return_annotation(self):
return self._return_annotation
def replace(self, parameters=_void, return_annotation=_void):
'''Creates a customized copy of the Signature.
Pass 'parameters' and/or 'return_annotation' arguments
to override them in the new copy.
'''
if parameters is _void:
parameters = self.parameters.values()
if return_annotation is _void:
return_annotation = self._return_annotation
return type(self)(parameters,
return_annotation=return_annotation)
def __hash__(self):
msg = "unhashable type: '{0}'".format(self.__class__.__name__)
raise TypeError(msg)
def __eq__(self, other):
if (not issubclass(type(other), Signature) or
self.return_annotation != other.return_annotation or
len(self.parameters) != len(other.parameters)):
return False
other_positions = dict((param, idx)
for idx, param in enumerate(other.parameters.keys()))
for idx, (param_name, param) in enumerate(self.parameters.items()):
if param.kind == _KEYWORD_ONLY:
try:
other_param = other.parameters[param_name]
except KeyError:
return False
else:
if param != other_param:
return False
else:
try:
other_idx = other_positions[param_name]
except KeyError:
return False
else:
if (idx != other_idx or
param != other.parameters[param_name]):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def _bind(self, args, kwargs, partial=False):
'''Private method. Don't use directly.'''
arguments = OrderedDict()
parameters = iter(self.parameters.values())
parameters_ex = ()
arg_vals = iter(args)
if partial:
# Support for binding arguments to 'functools.partial' objects.
# See 'functools.partial' case in 'signature()' implementation
# for details.
for param_name, param in self.parameters.items():
if (param._partial_kwarg and param_name not in kwargs):
# Simulating 'functools.partial' behavior
kwargs[param_name] = param.default
while True:
# Let's iterate through the positional arguments and corresponding
# parameters
try:
arg_val = next(arg_vals)
except StopIteration:
# No more positional arguments
try:
param = next(parameters)
except StopIteration:
# No more parameters. That's it. Just need to check that
# we have no `kwargs` after this while loop
break
else:
if param.kind == _VAR_POSITIONAL:
# That's OK, just empty *args. Let's start parsing
# kwargs
break
elif param.name in kwargs:
if param.kind == _POSITIONAL_ONLY:
msg = '{arg!r} parameter is positional only, ' \
'but was passed as a keyword'
msg = msg.format(arg=param.name)
raise TypeError(msg)
parameters_ex = (param,)
break
elif (param.kind == _VAR_KEYWORD or
param.default is not _empty):
# That's fine too - we have a default value for this
# parameter. So, lets start parsing `kwargs`, starting
# with the current parameter
parameters_ex = (param,)
break
else:
if partial:
parameters_ex = (param,)
break
else:
msg = '{arg!r} parameter lacking default value'
msg = msg.format(arg=param.name)
raise TypeError(msg)
else:
# We have a positional argument to process
try:
param = next(parameters)
except StopIteration:
raise TypeError('too many positional arguments')
else:
if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
# Looks like we have no parameter for this positional
# argument
raise TypeError('too many positional arguments')
if param.kind == _VAR_POSITIONAL:
# We have an '*args'-like argument, let's fill it with
# all positional arguments we have left and move on to
# the next phase
values = [arg_val]
values.extend(arg_vals)
arguments[param.name] = tuple(values)
break
if param.name in kwargs:
raise TypeError('multiple values for argument '
'{arg!r}'.format(arg=param.name))
arguments[param.name] = arg_val
# Now, we iterate through the remaining parameters to process
# keyword arguments
kwargs_param = None
for param in itertools.chain(parameters_ex, parameters):
if param.kind == _POSITIONAL_ONLY:
# This should never happen in case of a properly built
# Signature object (but let's have this check here
# to ensure correct behaviour just in case)
raise TypeError('{arg!r} parameter is positional only, '
'but was passed as a keyword'. \
format(arg=param.name))
if param.kind == _VAR_KEYWORD:
# Memorize that we have a '**kwargs'-like parameter
kwargs_param = param
continue
param_name = param.name
try:
arg_val = kwargs.pop(param_name)
except KeyError:
# We have no value for this parameter. It's fine though,
# if it has a default value, or it is an '*args'-like
# parameter, left alone by the processing of positional
# arguments.
if (not partial and param.kind != _VAR_POSITIONAL and
param.default is _empty):
raise TypeError('{arg!r} parameter lacking default value'. \
format(arg=param_name))
else:
arguments[param_name] = arg_val
if kwargs:
if kwargs_param is not None:
# Process our '**kwargs'-like parameter
arguments[kwargs_param.name] = kwargs
else:
raise TypeError('too many keyword arguments')
return self._bound_arguments_cls(self, arguments)
def bind(self, *args, **kwargs):
'''Get a BoundArguments object, that maps the passed `args`
and `kwargs` to the function's signature. Raises `TypeError`
if the passed arguments can not be bound.
'''
return self._bind(args, kwargs)
def bind_partial(self, *args, **kwargs):
'''Get a BoundArguments object, that partially maps the
passed `args` and `kwargs` to the function's signature.
Raises `TypeError` if the passed arguments can not be bound.
'''
return self._bind(args, kwargs, partial=True)
def __str__(self):
result = []
render_kw_only_separator = True
for idx, param in enumerate(self.parameters.values()):
formatted = str(param)
kind = param.kind
if kind == _VAR_POSITIONAL:
# OK, we have an '*args'-like parameter, so we won't need
# a '*' to separate keyword-only arguments
render_kw_only_separator = False
elif kind == _KEYWORD_ONLY and render_kw_only_separator:
# We have a keyword-only parameter to render and we haven't
# rendered an '*args'-like parameter before, so add a '*'
# separator to the parameters list ("foo(arg1, *, arg2)" case)
result.append('*')
# This condition should be only triggered once, so
# reset the flag
render_kw_only_separator = False
result.append(formatted)
rendered = '({0})'.format(', '.join(result))
if self.return_annotation is not _empty:
anno = formatannotation(self.return_annotation)
rendered += ' -> {0}'.format(anno)
return rendered
|
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
from future.moves.urllib.parse import urljoin, urlparse
import logging
import os
import posixpath
import time
from functools import partial
from collections import namedtuple
from itertools import groupby
from flexget import plugin
from flexget.event import event
from flexget.entry import Entry
from flexget.config_schema import one_or_more
from flexget.utils.template import render_from_entry, RenderError
log = logging.getLogger('sftp')
ConnectionConfig = namedtuple('ConnectionConfig', ['host', 'port', 'username', 'password',
'private_key', 'private_key_pass'])
# retry configuration contants
CONNECT_TRIES = 3
RETRY_INTERVAL = 15
RETRY_STEP = 5
SOCKET_TIMEOUT = 15
# make separate path instances for local vs remote path styles
localpath = os.path
remotepath = posixpath # pysftp uses POSIX style paths
try:
import pysftp
logging.getLogger("paramiko").setLevel(logging.ERROR)
except ImportError:
pysftp = None
def sftp_connect(conf):
"""
Helper function to connect to an sftp server
"""
sftp = None
tries = CONNECT_TRIES
retry_interval = RETRY_INTERVAL
while not sftp:
try:
sftp = pysftp.Connection(host=conf.host, username=conf.username,
private_key=conf.private_key, password=conf.password,
port=conf.port, private_key_pass=conf.private_key_pass)
sftp.timeout = SOCKET_TIMEOUT
log.verbose('Connected to %s' % conf.host)
except Exception as e:
if not tries:
raise e
else:
log.debug('Caught exception: %s' % e)
log.warning('Failed to connect to %s; waiting %d seconds before retrying.' %
(conf.host, retry_interval))
time.sleep(retry_interval)
tries -= 1
retry_interval += RETRY_STEP
return sftp
def sftp_from_config(config):
"""
Creates an SFTP connection from a Flexget config object
"""
host = config['host']
port = config['port']
username = config['username']
password = config['password']
private_key = config['private_key']
private_key_pass = config['private_key_pass']
conn_conf = ConnectionConfig(host, port, username, password, private_key, private_key_pass)
try:
sftp = sftp_connect(conn_conf)
except Exception as e:
raise plugin.PluginError('Failed to connect to %s (%s)' % (host, e))
return sftp
def sftp_prefix(config):
"""
Generate SFTP URL prefix
"""
login_str = ''
port_str = ''
if config['username'] and config['password']:
login_str = '%s:%s@' % (config['username'], config['password'])
elif config['username']:
login_str = '%s@' % config['username']
if config['port'] and config['port'] != 22:
port_str = ':%d' % config['port']
return 'sftp://%s%s%s/' % (login_str, config['host'], port_str)
def dependency_check():
"""
Check if pysftp module is present
"""
if not pysftp:
raise plugin.DependencyError(issued_by='sftp',
missing='pysftp',
message='sftp plugin requires the pysftp Python module.')
class SftpList(object):
"""
Generate entries from SFTP. This plugin requires the pysftp Python module and its dependencies.
Configuration:
host: Host to connect to
port: Port the remote SSH server is listening on. Defaults to port 22.
username: Username to log in as
password: The password to use. Optional if a private key is provided.
private_key: Path to the private key (if any) to log into the SSH server
private_key_pass: Password for the private key (if needed)
recursive: Indicates whether the listing should be recursive
get_size: Indicates whetern to calculate the size of the remote file/directory.
WARNING: This can be very slow when computing the size of directories!
files_only: Indicates wheter to omit diredtories from the results.
dirs: List of directories to download
Example:
sftp_list:
host: example.com
username: Username
private_key: /Users/username/.ssh/id_rsa
recursive: False
get_size: True
files_only: False
dirs:
- '/path/to/list/'
- '/another/path/'
"""
schema = {
'type': 'object',
'properties': {
'host': {'type': 'string'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'port': {'type': 'integer', 'default': 22},
'files_only': {'type': 'boolean', 'default': True},
'recursive': {'type': 'boolean', 'default': False},
'get_size': {'type': 'boolean', 'default': True},
'private_key': {'type': 'string'},
'private_key_pass': {'type': 'string'},
'dirs': one_or_more({'type': 'string'})
},
'additionProperties': False,
'required': ['host', 'username']
}
def prepare_config(self, config):
"""
Sets defaults for the provided configuration
"""
config.setdefault('port', 22)
config.setdefault('password', None)
config.setdefault('private_key', None)
config.setdefault('private_key_pass', None)
config.setdefault('dirs', ['.'])
return config
def on_task_input(self, task, config):
"""
Input task handler
"""
dependency_check()
config = self.prepare_config(config)
files_only = config['files_only']
recursive = config['recursive']
get_size = config['get_size']
private_key = config['private_key']
private_key_pass = config['private_key_pass']
dirs = config['dirs']
if not isinstance(dirs, list):
dirs = [dirs]
log.debug('Connecting to %s' % config['host'])
sftp = sftp_from_config(config)
url_prefix = sftp_prefix(config)
entries = []
def file_size(path):
"""
Helper function to get the size of a node
"""
return sftp.lstat(path).st_size
def dir_size(path):
"""
Walk a directory to get its size
"""
sizes = []
def node_size(f):
sizes.append(file_size(f))
sftp.walktree(path, node_size, node_size, node_size, True)
size = sum(sizes)
return size
def handle_node(path, size_handler, is_dir):
"""
Generic helper function for handling a remote file system node
"""
if is_dir and files_only:
return
url = urljoin(url_prefix, sftp.normalize(path))
title = remotepath.basename(path)
entry = Entry(title, url)
if get_size:
try:
size = size_handler(path)
except Exception as e:
log.error('Failed to get size for %s (%s)' % (path, e))
size = -1
entry['content_size'] = size
if private_key:
entry['private_key'] = private_key
if private_key_pass:
entry['private_key_pass'] = private_key_pass
entries.append(entry)
# create helper functions to handle files and directories
handle_file = partial(handle_node, size_handler=file_size, is_dir=False)
handle_dir = partial(handle_node, size_handler=dir_size, is_dir=True)
def handle_unknown(path):
"""
Skip unknown files
"""
log.warning('Skipping unknown file: %s' % path)
# the business end
for dir in dirs:
try:
sftp.walktree(dir, handle_file, handle_dir, handle_unknown, recursive)
except IOError as e:
log.error('Failed to open %s (%s)' % (dir, e))
continue
sftp.close()
return entries
class SftpDownload(object):
"""
Download files from a SFTP server. This plugin requires the pysftp Python module and its
dependencies.
Configuration:
to: Destination path; supports Jinja2 templating on the input entry. Fields such
as series_name must be populated prior to input into this plugin using
metainfo_series or similar.
recursive: Indicates wether to download directory contents recursively.
delete_origin: Indicates wether to delete the remote files(s) once they've been downloaded.
Example:
sftp_download:
to: '/Volumes/External/Drobo/downloads'
delete_origin: False
"""
schema = {
'type': 'object',
'properties': {
'to': {'type': 'string', 'format': 'path'},
'recursive': {'type': 'boolean', 'default': True},
'delete_origin': {'type': 'boolean', 'default': False}
},
'required': ['to'],
'additionalProperties': False
}
def get_sftp_config(self, entry):
"""
Parses a url and returns a hashable config, source path, and destination path
"""
# parse url
parsed = urlparse(entry['url'])
host = parsed.hostname
username = parsed.username or None
password = parsed.password or None
port = parsed.port or 22
# get private key info if it exists
private_key = entry.get('private_key')
private_key_pass = entry.get('private_key_pass')
if parsed.scheme == 'sftp':
config = ConnectionConfig(host, port, username, password, private_key, private_key_pass)
else:
log.warning('Scheme does not match SFTP: %s' % entry['url'])
config = None
return config
def download_file(self, path, dest, sftp, delete_origin):
"""
Download a file from path to dest
"""
dir_name = remotepath.dirname(path)
dest_relpath = localpath.join(*remotepath.split(path)) # convert remote path style to local style
destination = localpath.join(dest, dest_relpath)
dest_dir = localpath.dirname(destination)
if localpath.exists(destination):
log.verbose('Destination file already exists. Skipping %s' % path)
return
if not localpath.exists(dest_dir):
os.makedirs(dest_dir)
log.verbose('Downloading file %s to %s' % (path, destination))
try:
sftp.get(path, destination)
except Exception as e:
log.error('Failed to download %s (%s)' % (path, e))
if localpath.exists(destination):
log.debug('Removing partially downloaded file %s' % destination)
os.remove(destination)
raise e
if delete_origin:
log.debug('Deleting remote file %s' % path)
try:
sftp.remove(path)
except Exception as e:
log.error('Failed to delete file %s (%s)' % (path, e))
return
self.remove_dir(sftp, dir_name)
def handle_dir(self, path):
"""
Dummy directory handler. Does nothing.
"""
pass
def handle_unknown(self, path):
"""
Dummy unknown file handler. Warns about unknown files.
"""
log.warning('Skipping unknown file %s' % path)
def remove_dir(self, sftp, path):
"""
Remove a directory if it's empty
"""
if sftp.exists(path) and not sftp.listdir(path):
log.debug('Attempting to delete directory %s' % path)
try:
sftp.rmdir(path)
except Exception as e:
log.error('Failed to delete directory %s (%s)' % (path, e))
def download_entry(self, entry, config, sftp):
"""
Downloads the file(s) described in entry
"""
path = urlparse(entry['url']).path or '.'
delete_origin = config['delete_origin']
recursive = config['recursive']
to = config['to']
if to:
try:
to = render_from_entry(to, entry)
except RenderError as e:
log.error('Could not render path: %s' % to)
entry.fail(e)
return
if not sftp.lexists(path):
log.error('Remote path does not exist: %s' % path)
return
if sftp.isfile(path):
source_file = remotepath.basename(path)
source_dir = remotepath.dirname(path)
try:
sftp.cwd(source_dir)
self.download_file(source_file, to, sftp, delete_origin)
except Exception as e:
error = 'Failed to download file %s (%s)' % (path, e)
log.error(error)
entry.fail(error)
elif sftp.isdir(path):
base_path = remotepath.normpath(remotepath.join(path, '..'))
dir_name = remotepath.basename(path)
handle_file = partial(self.download_file, dest=to, sftp=sftp, delete_origin=delete_origin)
try:
sftp.cwd(base_path)
sftp.walktree(dir_name, handle_file, self.handle_dir, self.handle_unknown, recursive)
except Exception as e:
error = 'Failed to download directory %s (%s)' % (path, e)
log.error(error)
entry.fail(error)
return
if delete_origin:
self.remove_dir(sftp, path)
else:
log.warning('Skipping unknown file %s' % path)
def on_task_download(self, task, config):
"""
Task handler for sftp_download plugin
"""
dependency_check()
# Download entries by host so we can reuse the connection
for sftp_config, entries in groupby(task.accepted, self.get_sftp_config):
if not sftp_config:
continue
error_message = None
sftp = None
try:
sftp = sftp_connect(sftp_config)
except Exception as e:
error_message = 'Failed to connect to %s (%s)' % (sftp_config.host, e)
log.error(error_message)
for entry in entries:
if sftp:
self.download_entry(entry, config, sftp)
else:
entry.fail(error_message)
if sftp:
sftp.close()
class SftpUpload(object):
"""
Upload files to a SFTP server. This plugin requires the pysftp Python module and its
dependencies.
host: Host to connect to
port: Port the remote SSH server is listening on. Defaults to port 22.
username: Username to log in as
password: The password to use. Optional if a private key is provided.
private_key: Path to the private key (if any) to log into the SSH server
private_key_pass: Password for the private key (if needed)
to: Path to upload the file to; supports Jinja2 templating on the input entry. Fields such
as series_name must be populated prior to input into this plugin using
metainfo_series or similar.
delete_origin: Indicates wheter to delete the original file after a successful
upload.
Example:
sftp_list:
host: example.com
username: Username
private_key: /Users/username/.ssh/id_rsa
to: /TV/{{series_name}}/Series {{series_season}}
delete_origin: False
"""
schema = {
'type': 'object',
'properties': {
'host': {'type': 'string'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'port': {'type': 'integer', 'default': 22},
'private_key': {'type': 'string'},
'private_key_pass': {'type': 'string'},
'to': {'type': 'string'},
'delete_origin': {'type': 'boolean', 'default': False}
},
'additionProperties': False,
'required': ['host', 'username']
}
def prepare_config(self, config):
"""
Sets defaults for the provided configuration
"""
config.setdefault('password', None)
config.setdefault('private_key', None)
config.setdefault('private_key_pass', None)
config.setdefault('to', None)
return config
def handle_entry(self, entry, sftp, config, url_prefix):
location = entry['location']
filename = localpath.basename(location)
to = config['to']
if to:
try:
to = render_from_entry(to, entry)
except RenderError as e:
log.error('Could not render path: %s', to)
entry.fail(e)
return
destination = remotepath.join(to, filename)
destination_url = urljoin(url_prefix, destination)
if not os.path.exists(location):
log.warning('File no longer exists: %s', location)
return
if not sftp.lexists(to):
try:
sftp.makedirs(to)
except Exception as e:
log.error('Failed to create remote directory %s (%s)' % (to, e))
entry.fail(e)
return
if not sftp.isdir(to):
log.error('Not a directory: %s' % to)
entry.fail('Not a directory: %s' % to)
return
try:
sftp.put(localpath=location, remotepath=destination)
log.verbose('Successfully uploaded %s to %s' % (location, destination_url))
except IOError as e:
log.error('Remote directory does not exist: %s (%s)' % to)
entry.fail('Remote directory does not exist: %s (%s)' % to)
return
except Exception as e:
log.error('Failed to upload %s (%s)' % (location, e))
entry.fail('Failed to upload %s (%s)' % (location, e))
return
if config['delete_origin']:
try:
os.remove(location)
except Exception as e:
log.error('Failed to delete file %s (%s)')
def on_task_output(self, task, config):
"""Uploads accepted entries to the specified SFTP server."""
config = self.prepare_config(config)
sftp = sftp_from_config(config)
url_prefix = sftp_prefix(config)
for entry in task.accepted:
if sftp:
log.debug('Uploading file: %s' % entry)
self.handle_entry(entry, sftp, config, url_prefix)
else:
log.debug('SFTP connection failed; failing entry: %s' % entry)
entry.fail('SFTP connection failed; failing entry: %s' % entry)
@event('plugin.register')
def register_plugin():
plugin.register(SftpList, 'sftp_list', api_ver=2)
plugin.register(SftpDownload, 'sftp_download', api_ver=2)
plugin.register(SftpUpload, 'sftp_upload', api_ver=2)
|
|
""" Class to set-up the configuration used throughout Drupdates """
import os, yaml, sys, json, copy
from os.path import expanduser
try:
import argparse
ARG_LOADED = True
except ImportError:
# Python 2.6
from optparse import OptionParser
ARG_LOADED = False
class DrupdatesError(Exception):
""" Parent Drupdates error.
level notes:
Warning =< 10
Critical =< 20
Fatal > 20
"""
def __init__(self, level, msg):
Exception.__init__(self)
self.level = level
self.msg = msg
class _Settings(object):
""" Build the settings used throughout the drupdates project.
Settings are built from either YAML files or options passed when run on
CLI. Settings are loaded in this order:
- Core settings file, ie drupdates/settings/default.yaml
- Plugin settings files, ie <plugin dir>/settings/default.yaml
- Local settings file in $HOME/.drupdates, ie $HOME/.drupdates/settings.yaml
- Working Directory settings file, ie <working_directory>/.drupdates/settings.yaml
- Site Repo settings file, ie <webroot>/.drupdates/settings.yaml
- Options passed at runtime, ie $python -m drupdates --workingDir=/opt/
- Prompts to end user, only if required and no value found above
The later the setting is loaded the higher its weight, ie if it's set at
runtime it will overwrite anything set in the Core or local settings file.
"""
def __init__(self):
self.settings = {}
current_dir = os.path.dirname(os.path.realpath(__file__))
settings_file = current_dir + '/settings/default.yaml'
self.add(settings_file)
self._custom_settings()
self.core_settings = copy.copy(self.settings)
self._options = self.options()
def _custom_settings(self):
""" Load custom settings file in $HOME/.drupdates/settings.yaml. """
path = __name__
local_file = expanduser('~') + '/.' + '/'.join(path.split('.')) + '.yaml'
# If there is an override file in the home dir
# (ex. ~/.drupdates/settings.yaml)
try:
self.add(local_file, True)
except DrupdatesError:
pass
def options(self):
""" Read the options set at runtime. """
if ARG_LOADED:
parser = argparse.ArgumentParser()
model = self._model()
for key, setting in self.settings.items():
setting_complete = self.merge(model, setting)
parser.add_argument("--" + key, dest=key,
help=setting_complete['prompt'])
options = parser.parse_args()
else:
parser = OptionParser()
model = self._model()
for key, setting in self.settings.items():
setting_complete = self.merge(model, setting)
parser.add_option("--" + key, action="store", dest=key, type="string",
help=setting_complete['prompt'])
parsed_options = parser.parse_args()
options = parsed_options[0]
return options
@staticmethod
def _model():
""" Model for an individual setting. """
value = {}
value['value'] = ''
value['prompt'] = ''
value['format'] = ''
value['required'] = ''
# Does this setting require another setting be set
value['requires'] = ''
return value
def query_user(self, setting, complete):
""" Query the user for a partiular setting. """
prompt = "Please provide the setting, {0}, {1}:".format(setting, complete['prompt'])
value = raw_input(prompt)
self.set(setting, value, complete['format'])
def get(self, setting):
""" Getter for Settings class """
if setting in self.settings:
model = self._model()
setting_complete = self.merge(model, self.settings[setting])
cli_option = getattr(self._options, setting)
if cli_option:
setting_complete['value'] = cli_option
if not setting_complete['value'] and setting_complete['required']:
self.query_user(setting, setting_complete)
if setting_complete['value'] and setting_complete['requires']:
required = self.get(setting_complete['requires'])
if not required:
self.query_user(setting_complete['requires'],
self.settings[setting_complete['requires']])
return setting_complete['value']
else:
return ""
def add(self, settings_file, force=False):
""" Load/Add settings from a YAML file.
Keyword arguments:
settings_file -- file path to a settings YAML file (required)
force -- Incoming settings overwrite current settings (default = False)
"""
try:
default = open(settings_file, 'r')
except IOError as error:
msg = "Can't open or read settings file, {0}".format(settings_file)
raise DrupdatesError(20, msg)
new = yaml.load(default)
default.close()
for setting, item in new.items():
if not isinstance(item, dict):
error = "Exiting Drupdates \n"
error += "Fatal Error: Settngs file, {0}, ".format(settings_file)
error += "failed to load or parse properly. \n"
error += "{0} setting is fomatted improperly".format(setting)
sys.exit(error)
if not force:
self.settings = self.merge(new, self.settings)
else:
self.settings = self.merge(self.settings, new)
self._options = self.options()
def set(self, setting, value, setting_format='str'):
""" Setter function for Settings class. """
if setting_format:
if setting_format == 'list':
value = value.split()
elif setting_format == 'dict':
value = json.loads(value)
self.settings[setting]['value'] = value
def merge(self, target, source, path=None):
""" Utiliity used to merge two dictionaries, merges source into target.
If target and source contain same key, return will contain source value.
"""
if path is None:
path = []
for key in source:
if key in target:
if isinstance(target[key], dict) and isinstance(source[key], dict) and key == 'value':
self.merge(target[key], source[key], path + [str(key)])
else:
target[key] = source[key]
else:
target[key] = source[key]
return target
def reset(self):
""" Reset the settings attribute. """
self.settings = self.core_settings
def list(self):
""" Return settings list. """
return self.settings
class Settings(object):
""" Base Settings class. """
instance = None
def __new__(cls):
if not Settings.instance:
Settings.instance = _Settings()
return Settings.instance
@staticmethod
def get(setting):
""" Get the setting. """
return Settings.instance.get(setting)
@staticmethod
def set(setting, value, setting_format=''):
""" Set the setting. """
return Settings.instance.set(setting, value, setting_format)
@staticmethod
def add(settings_file, force=False):
""" Load settings from a YAML file. """
return Settings.instance.add(settings_file, force)
@staticmethod
def reset():
""" Reset the settings. """
Settings.instance.reset()
@staticmethod
def list():
""" Provide dictionary of all the settings. """
return Settings.instance.list()
|
|
"""
Manage pushing and pulling files from an object store like
Amazon Web Services S3.
"""
# pylint: disable=redefined-builtin
import abc
import collections
import os
import re
import subprocess
import sys
import time
import zlib
try:
import azure
from azure import storage as azure_storage
except ImportError:
azure, azure_storage = None, None
import boto
import six
from bcbio.distributed.transaction import file_transaction
from bcbio import utils
SUPPORTED_REMOTES = ("s3://",)
BIODATA_INFO = {"s3": "s3://biodata/prepped/{build}/{build}-{target}.tar.gz"}
REGIONS_NEWPERMS = {"s3": ["eu-central-1"]}
@six.add_metaclass(abc.ABCMeta)
class FileHandle(object):
"""Contract class for the file handle."""
def __init__(self):
self._iter = self._line_iter()
def __enter__(self):
"""Define what the context manager should do at the beginning
of the block created by the with statement.
"""
return self
def __exit__(self, *args):
"""Define what the context manager should do after its block
has been executed (or terminates).
"""
self.close()
def __iter__(self):
"""Return the iterator for the current file."""
return self
def _line_iter(self):
"""Storage manager file iterator splits by buffer size instead
of by newline. This wrapper puts them back into lines.
From mrjob: https://github.com/Yelp/mrjob/blob/master/mrjob/util.py
"""
buf = ""
search_offset = 0
for chunk in self._chunk_iter():
buf += chunk
start = 0
while True:
end = buf.find("\n", start + search_offset) + 1
if end: # if find() returned -1, end would be 0
yield buf[start:end]
start = end
# reset the search offset
search_offset = 0
else:
# this will happen eventually
buf = buf[start:]
# set search offset so we do not need to scan this part
# of the buffer again
search_offset = len(buf)
break
if buf:
yield buf + '\n'
@abc.abstractmethod
def _chunk_iter(self):
"""Chunk iterator over the received file."""
pass
@abc.abstractmethod
def read(self, size):
"""Read at most size bytes from the file (less if the read hits EOF
before obtaining size bytes).
"""
pass
@abc.abstractmethod
def next(self):
"""Return the next item from the container."""
pass
@abc.abstractmethod
def close(self):
"""Close the file handle."""
pass
class S3Handle(FileHandle):
"""File object for the Amazon S3 files."""
def __init__(self, key):
super(S3Handle, self).__init__()
self._key = key
if self._key.name.endswith(".gz"):
decompress = zlib.decompressobj(16 | zlib.MAX_WBITS)
self._decompress = decompress.decompress
else:
self._decompress = lambda value: value
def _chunk_iter(self):
"""Iterator over the S3 file."""
for chunk in self._key:
yield self._decompress(chunk)
def read(self, size):
"""Read at most size bytes from the file (less if the read hits EOF
before obtaining size bytes).
"""
return self._key.read(size)
def next(self):
"""Return the next item from the container."""
return self._iter.next()
def close(self):
"""Close the file handle."""
self._key.close(fast=True)
class BlobHandle(FileHandle):
"""File object for the Azure Blob files."""
def __init__(self, blob_service, container, blob, chunk_size):
super(BlobHandle, self).__init__()
self._blob_service = blob_service
self._container_name = container
self._blob_name = blob
self._chunk_size = chunk_size
self._blob_properties = {}
if blob.endswith(".gz"):
decompress = zlib.decompressobj(16 | zlib.MAX_WBITS)
self._decompress = decompress.decompress
else:
self._decompress = lambda value: value
@property
def blob_properties(self):
"""Returns all user-defined metadata, standard HTTP properties,
and system properties for the blob.
"""
if not self._blob_properties:
self._blob_properties = self._blob_service.get_blob_properties(
container_name=self._container_name,
blob_name=self._blob_name)
return self._blob_properties
def _chunk_offsets(self):
"""Iterator over chunk offests."""
index = 0
blob_size = self.blob_properties.get('content-length')
while index < blob_size:
yield index
index = index + self._chunk_size
def _chunk_iter(self):
"""Iterator over the blob file."""
for chunk_offset in self._chunk_offsets():
yield self._download_chunk(chunk_offset=chunk_offset,
chunk_size=self._chunk_size)
def _download_chunk_with_retries(self, chunk_offset, chunk_size,
retries=3, retry_wait=1):
"""Reads or downloads the received blob from the system."""
while True:
try:
self._download_chunk(chunk_offset, chunk_size)
except azure.WindowsAzureError:
if retries > 0:
retries = retries - 1
time.sleep(retry_wait)
else:
raise
def _download_chunk(self, chunk_offset, chunk_size):
"""Reads or downloads the received blob from the system."""
range_id = 'bytes={0}-{1}'.format(
chunk_offset, chunk_offset + chunk_size - 1)
return self._blob_service.get_blob(
container_name=self._container_name,
blob_name=self._blob_name,
x_ms_range=range_id)
def read(self, size):
"""Read at most size bytes from the file (less if the read hits EOF
before obtaining size bytes).
"""
return self._download_chunk_with_retries(chunk_offset=0,
chunk_size=size)
def next(self):
"""Return the next item from the container."""
return self._iter.next()
def close(self):
"""Close the file handle."""
pass
@six.add_metaclass(abc.ABCMeta)
class StorageManager(object):
"""The contract class for all the storage managers."""
@abc.abstractmethod
def check_resource(self, resource):
"""Check if the received resource can be processed by
the current storage manager.
"""
pass
@abc.abstractmethod
def parse_remote(self, filename):
"""Parse a remote filename in order to obtain information
related to received resource.
"""
pass
@abc.abstractmethod
def connect(self, resource):
"""Return a connection object pointing to the endpoint
associated to the received resource.
"""
pass
@abc.abstractmethod
def download(self, filename, input_dir, dl_dir=None):
"""Download the resource from the storage."""
pass
@abc.abstractmethod
def list(self, path):
"""Return a list containing the names of the entries in the directory
given by path. The list is in arbitrary order.
"""
pass
@abc.abstractmethod
def open(self, filename):
"""Provide a handle-like object for streaming."""
pass
class AmazonS3(StorageManager):
"""Amazon Simple Storage Service (Amazon S3) Manager."""
_DEFAULT_REGION = "us-east-1"
_REMOTE_FILE = collections.namedtuple(
"RemoteFile", ["store", "bucket", "key", "region"])
_S3_FILE = "s3://%(bucket)s%(region)s/%(key)s"
def __init__(self):
super(AmazonS3, self).__init__()
@classmethod
def parse_remote(cls, filename):
"""Parses a remote filename into bucket and key information.
Handles S3 with optional region name specified in key:
BUCKETNAME@REGIONNAME/KEY
"""
parts = filename.split("//")[-1].split("/", 1)
bucket, key = parts if len(parts) == 2 else (parts[0], None)
if bucket.find("@") > 0:
bucket, region = bucket.split("@")
else:
region = None
return cls._REMOTE_FILE("s3", bucket, key, region)
@classmethod
def _cl_aws_cli(cls, file_info, region):
"""Command line required for download using the standard AWS
command line interface.
"""
s3file = cls._S3_FILE % {"bucket": file_info.bucket,
"key": file_info.key,
"region": ""}
command = [os.path.join(os.path.dirname(sys.executable), "aws"),
"s3", "cp", "--region", region, s3file]
return (command, "awscli")
@staticmethod
def _cl_gof3r(file_info, region):
"""Command line required for download using gof3r."""
command = ["gof3r", "get", "--no-md5",
"-k", file_info.key,
"-b", file_info.bucket,
"--endpoint=s3-%s.amazonaws.com" % region]
return (command, "gof3r")
@classmethod
def _download_cl(cls, filename):
"""Provide potentially streaming download from S3 using gof3r
or the AWS CLI.
Selects the correct endpoint for non us-east support:
http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
In eu-central-1 gof3r does not support new AWS signatures,
so we fall back to the standard AWS commandline interface:
https://github.com/rlmcpherson/s3gof3r/issues/45
"""
file_info = cls.parse_remote(filename)
region = cls.get_region(filename)
if region != "us-east-1":
if region in REGIONS_NEWPERMS["s3"]:
return cls._cl_aws_cli(file_info, region)
return cls._cl_gof3r(file_info, region)
@classmethod
def get_region(cls, resource=None):
"""Retrieve region from standard environmental variables
or file name.
More information of the following link: http://goo.gl/Vb9Jky
"""
if resource:
resource_info = cls.parse_remote(resource)
if resource_info.region:
return resource_info.region
return os.environ.get("AWS_DEFAULT_REGION", cls._DEFAULT_REGION)
@classmethod
def check_resource(cls, resource):
"""Check if the received resource can be processed by
the current storage manager.
"""
if resource and resource.startswith("s3://"):
return True
return False
@classmethod
def connect(cls, resource):
"""Connect to this Region's endpoint.
Returns a connection object pointing to the endpoint associated
to the received resource.
"""
return boto.s3.connect_to_region(cls.get_region(resource))
@classmethod
def download(cls, filename, input_dir, dl_dir=None):
"""Provide potentially streaming download from S3 using gof3r
or the AWS CLI.
"""
file_info = cls.parse_remote(filename)
if not dl_dir:
dl_dir = os.path.join(input_dir, file_info.bucket,
os.path.dirname(file_info.key))
utils.safe_makedir(dl_dir)
out_file = os.path.join(dl_dir, os.path.basename(file_info.key))
if not utils.file_exists(out_file):
with file_transaction({}, out_file) as tx_out_file:
command, prog = cls._download_cl(filename)
if prog == "gof3r":
command.extend(["-p", tx_out_file])
elif prog == "awscli":
command.extend([tx_out_file])
else:
raise NotImplementedError(
"Unexpected download program %s" % prog)
subprocess.check_call(command)
return out_file
@classmethod
def cl_input(cls, filename, unpack=True, anonpipe=True):
"""Return command line input for a file, handling streaming
remote cases.
"""
command, prog = cls._download_cl(filename)
if prog == "awscli":
command.append("-")
command = " ".join(command)
if filename.endswith(".gz") and unpack:
command = "%(command)s | gunzip -c" % {"command": command}
if anonpipe:
command = "<(%(command)s)" % {"command": command}
return command
@classmethod
def list(cls, path):
"""Return a list containing the names of the entries in the directory
given by path. The list is in arbitrary order.
"""
file_info = cls.parse_remote(path)
connection = cls.connect(path)
bucket = connection.get_bucket(file_info.bucket)
region = "@%s" % file_info.region if file_info.region else ""
output = []
for key in bucket.get_all_keys(prefix=file_info.key):
output.append(cls._S3_FILE % {"bucket": file_info.bucket,
"key": key.name,
"region": region})
return output
@classmethod
def open(cls, filename):
"""Return a handle like object for streaming from S3."""
file_info = cls.parse_remote(filename)
connection = cls.connect(filename)
try:
s3_bucket = connection.get_bucket(file_info.bucket)
except boto.exception.S3ResponseError as error:
# if we don't have bucket permissions but folder permissions,
# try without validation
if error.status == 403:
s3_bucket = connection.get_bucket(file_info.bucket,
validate=False)
else:
raise
s3_key = s3_bucket.get_key(file_info.key)
return S3Handle(s3_key)
class AzureBlob(StorageManager):
"""Azure Blob storage service manager."""
_BLOB_FILE = ("https://%(storage)s.blob.core.windows.net/"
"%(container)s/%(blob)s")
_REMOTE_FILE = collections.namedtuple(
"RemoteFile", ["store", "storage", "container", "blob"])
_URL_FORMAT = re.compile(r'http.*\/\/(?P<storage>[^.]+)[^/]+\/'
r'(?P<container>[^/]+)\/*(?P<blob>[^/]*)')
_BLOB_CHUNK_DATA_SIZE = 4 * 1024 * 1024
def __init__(self):
super(AzureBlob, self).__init__()
@classmethod
def check_resource(cls, resource):
"""Check if the received resource can be processed by
the current storage manager.
"""
return cls._URL_FORMAT.match(resource or "")
@classmethod
def parse_remote(cls, filename):
"""Parses a remote filename into blob information."""
blob_file = cls._URL_FORMAT.search(filename)
return cls._REMOTE_FILE("blob",
storage=blob_file.group("storage"),
container=blob_file.group("container"),
blob=blob_file.group("blob"))
@classmethod
def connect(cls, resource):
"""Returns a connection object pointing to the endpoint
associated to the received resource.
"""
file_info = cls.parse_remote(resource)
return azure_storage.BlobService(file_info.storage)
@classmethod
def download(cls, filename, input_dir, dl_dir=None):
"""Download the resource from the storage."""
file_info = cls.parse_remote(filename)
if not dl_dir:
dl_dir = os.path.join(input_dir, file_info.container,
os.path.dirname(file_info.storage))
utils.safe_makedir(dl_dir)
out_file = os.path.join(dl_dir, os.path.basename(file_info.storage))
if not utils.file_exists(out_file):
with file_transaction({}, out_file) as tx_out_file:
blob_service = cls.connect(filename)
blob_service.get_blob_to_path(
container_name=file_info.container,
blob_name=file_info.blob,
file_path=tx_out_file)
return out_file
@classmethod
def list(cls, path):
"""Return a list containing the names of the entries in the directory
given by path. The list is in arbitrary order.
"""
output = []
path_info = cls.parse_remote(path)
blob_service = azure_storage.BlobService(path_info.storage)
try:
blob_enum = blob_service.list_blobs(path_info.container)
except azure.WindowsAzureMissingResourceError:
return output
for item in blob_enum:
output.append(cls._BLOB_FILE.format(storage=path_info.storage,
container=path_info.container,
blob=item.name))
return output
@classmethod
def open(cls, filename):
"""Provide a handle-like object for streaming."""
file_info = cls.parse_remote(filename)
blob_service = cls.connect(filename)
return BlobHandle(blob_service=blob_service,
container=file_info.container,
blob=file_info.blob,
chunk_size=cls._BLOB_CHUNK_DATA_SIZE)
def _get_storage_manager(resource):
"""Return a storage manager which can process this resource."""
for manager in (AmazonS3, AzureBlob):
if manager.check_resource(resource):
return manager()
raise ValueError("Unexpected object store %(resource)s" %
{"resource": resource})
def is_remote(fname):
"""Check if the received file is recognised by one of
the available storage managers.
"""
try:
_get_storage_manager(fname)
except ValueError:
return False
return True
def file_exists_or_remote(fname):
"""Check if a file exists or is accessible remotely."""
if is_remote(fname):
return True
else:
return utils.file_exists(fname)
def default_region(fname):
"""Return the default region for the received resource.
Note:
This feature is available only for AmazonS3 storage manager.
"""
manager = _get_storage_manager(fname)
if hasattr(manager, "get_region"):
return manager.get_region()
raise NotImplementedError("Unexpected object store %s" % fname)
def connect(filename):
"""Returns a connection object pointing to the endpoint associated
to the received resource.
"""
manager = _get_storage_manager(filename)
return manager.connect(filename)
def download(fname, input_dir, dl_dir=None):
"""Download the resource from the storage."""
try:
manager = _get_storage_manager(fname)
except ValueError:
return fname
return manager.download(fname, input_dir, dl_dir)
def cl_input(fname, unpack=True, anonpipe=True):
"""Return command line input for a file, handling streaming
remote cases.
"""
try:
manager = _get_storage_manager(fname)
except ValueError:
return fname
return manager.cl_input(fname, unpack, anonpipe)
def list(remote_dirname):
"""Return a list containing the names of the entries in the directory
given by path. The list is in arbitrary order.
"""
manager = _get_storage_manager(remote_dirname)
return manager.list(remote_dirname)
def open(fname):
"""Provide a handle-like object for streaming."""
manager = _get_storage_manager(fname)
return manager.open(fname)
def parse_remote(fname):
"""Parses a remote filename in order to obtain information
related to received resource.
"""
manager = _get_storage_manager(fname)
return manager.parse_remote(fname)
|
|
from os.path import join
import tensorflow_hub as hub
import tensorflow as tf
import numpy as np
import pytest
from os import environ
from pliers.tests.utils import get_test_data_path
from pliers import config
from pliers.extractors import (TensorFlowKerasApplicationExtractor,
TFHubExtractor,
TFHubImageExtractor,
TFHubTextExtractor,
BertExtractor,
BertSequenceEncodingExtractor,
BertLMExtractor,
BertSentimentExtractor,
AudiosetLabelExtractor)
from pliers.filters import AudioResamplingFilter
from pliers.stimuli import (ImageStim,
TextStim, ComplexTextStim,
AudioStim)
from pliers.extractors.base import merge_results
from transformers import BertTokenizer
from pliers.utils import verify_dependencies
cache_default = config.get_option('cache_transformers')
config.set_option('cache_transformers', False)
IMAGE_DIR = join(get_test_data_path(), 'image')
TEXT_DIR = join(get_test_data_path(), 'text')
AUDIO_DIR = join(get_test_data_path(), 'audio')
EFFNET_URL = 'https://tfhub.dev/tensorflow/efficientnet/b7/classification/1'
MNET_URL = 'https://tfhub.dev/google/imagenet/mobilenet_v2_100_224/feature_vector/4'
SENTENC_URL = 'https://tfhub.dev/google/universal-sentence-encoder/4'
GNEWS_URL = 'https://tfhub.dev/google/nnlm-en-dim128-with-normalization/2'
TOKENIZER_URL = 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/2'
ELECTRA_URL = 'https://tfhub.dev/google/electra_small/2'
SPEECH_URL = 'https://tfhub.dev/google/speech_embedding/1'
pytestmark = pytest.mark.skipif(
environ.get('skip_high_memory', False) == 'true', reason='high memory')
def test_tensorflow_keras_application_extractor():
imgs = [join(IMAGE_DIR, f) for f in ['apple.jpg', 'obama.jpg']]
imgs = [ImageStim(im, onset=4.2, duration=1) for im in imgs]
ext = TensorFlowKerasApplicationExtractor()
results = ext.transform(imgs)
df = merge_results(results, format='wide', extractor_names='multi')
assert df.shape == (2, 19)
true = 0.9737075
pred = df['TensorFlowKerasApplicationExtractor'].loc[0, 'Granny_Smith']
assert np.isclose(true, pred, 1e-05)
true = 0.64234024
pred = df['TensorFlowKerasApplicationExtractor'].loc[1, 'Windsor_tie']
assert np.isclose(true, pred, 1e-05)
assert 4.2 in df[('onset', np.nan)].values
assert 1 in df[('duration', np.nan)].values
with pytest.raises(ValueError):
TensorFlowKerasApplicationExtractor(architecture='foo')
def test_tfhub_image():
stim = ImageStim(join(IMAGE_DIR, 'apple.jpg'))
ext = TFHubImageExtractor(EFFNET_URL)
df = ext.transform(stim).to_df()
assert all(['feature_' + str(i) in df.columns \
for i in range(1000) ])
assert np.argmax(np.array([df['feature_' + str(i)][0] \
for i in range(1000)])) == 948
def test_tfhub_image_reshape():
stim = ImageStim(join(IMAGE_DIR, 'apple.jpg'))
stim2 = ImageStim(join(IMAGE_DIR, 'obama.jpg'))
ext = TFHubImageExtractor(MNET_URL,
reshape_input=(224,224,3),
features='feature_vector')
df = merge_results(ext.transform([stim, stim2]),
extractor_names=False)
assert df.shape[0] == 2
assert all([len(v) == 1280 for v in df['feature_vector']])
def test_tfhub_text():
stim = TextStim(join(TEXT_DIR, 'scandal.txt'))
ext = TFHubTextExtractor(SENTENC_URL, output_key=None)
df = ext.transform(stim).to_df()
assert all([f'feature_{i}' in df.columns for i in range(512)])
true = hub.KerasLayer(SENTENC_URL)([stim.text])[0,10].numpy()
assert np.isclose(df['feature_10'][0], true)
def test_tfhub_text_one_feature():
stim = TextStim(join(TEXT_DIR, 'scandal.txt'))
cstim = ComplexTextStim(join(TEXT_DIR, 'wonderful.txt'))
ext = TFHubTextExtractor(GNEWS_URL, output_key=None,
features='embedding')
df = merge_results(ext.transform(cstim), extractor_names=False)
assert df.shape[0] == len(cstim.elements)
true = hub.KerasLayer(GNEWS_URL)([cstim.elements[3].text])[0,2].numpy()
assert np.isclose(df['embedding'][3][2], true)
with pytest.raises(ValueError) as err:
TFHubTextExtractor(GNEWS_URL, output_key='key').transform(stim)
assert 'not a dictionary' in str(err.value)
def test_tfhub_text_transformer_sentence():
stim = TextStim(join(TEXT_DIR, 'scandal.txt'))
cstim = ComplexTextStim(join(TEXT_DIR, 'wonderful.txt'))
ext = TFHubTextExtractor(ELECTRA_URL,
features='sent_encoding',
preprocessor_url_or_path=TOKENIZER_URL)
res = ext.transform(cstim.elements[:6])
df = merge_results(res, extractor_names=False)
pmod = hub.KerasLayer(TOKENIZER_URL)
mmod = hub.KerasLayer(ELECTRA_URL)
true = mmod(pmod([cstim.elements[5].text]))\
['pooled_output'][0,20].numpy()
assert np.isclose(df['sent_encoding'][5][20], true)
with pytest.raises(ValueError) as err:
TFHubTextExtractor(ELECTRA_URL,
preprocessor_url_or_path=TOKENIZER_URL,
output_key='key').transform(stim)
assert 'Check which keys' in str(err.value)
def test_tfhub_text_transformer_tokens():
cstim = ComplexTextStim(join(TEXT_DIR, 'wonderful.txt'))
tkn_ext = TFHubTextExtractor(ELECTRA_URL,
features='token_encodings',
output_key='sequence_output',
preprocessor_url_or_path=TOKENIZER_URL)
tkn_df = merge_results(tkn_ext.transform(cstim.elements[:3]),
extractor_names=False)
assert all([tkn_df['token_encodings'][i].shape == (128, 256) \
for i in range(tkn_df.shape[0])])
def test_tfhub_generic():
# Test generic extractor with speech embedding model
astim = AudioStim(join(AUDIO_DIR, 'obama_speech.wav'))
astim = AudioResamplingFilter(target_sr=16000).transform(astim)
transform_fn = lambda x: tf.expand_dims(x, axis=0)
aext = TFHubExtractor(SPEECH_URL,
transform_inp=transform_fn,
features='speech_embedding')
df = aext.transform(astim).to_df()
# Check expected dimensionality (see model URL)
emb_dim = 96
n_chunks = 1 + (astim.data.shape[0] - 12400) // 1280
assert df['speech_embedding'][0].shape == (n_chunks, emb_dim)
def test_bert_extractor():
stim = ComplexTextStim(text='This is not a tokenized sentence.')
stim_file = ComplexTextStim(join(TEXT_DIR, 'sentence_with_header.txt'))
ext_base = BertExtractor(pretrained_model='bert-base-uncased')
ext_base_token = BertExtractor(pretrained_model='bert-base-uncased',
return_input=True)
ext_tf = BertExtractor(pretrained_model='bert-base-uncased', framework='tf')
base_result = ext_base.transform(stim)
res = base_result.to_df()
res_token = ext_base_token.transform(stim).to_df()
res_file = ext_base.transform(stim_file).to_df()
res_tf = ext_tf.transform(stim).to_df()
# Test encoding shape
assert len(res['encoding'][0]) == 768
assert len(res_file['encoding'][0]) == 768
# test base extractor
assert res.shape[0] == 8
assert res_token.shape[0] == 8
assert res_token['token'][5] == '##ized'
assert res_token['word'][5] == 'tokenized'
assert res_token['object_id'][5] == 5
# test base extractor on file
assert res_file.shape[0] == 8
assert res_file['onset'][3] == 1.3
assert res_file['duration'][5] == 0.5
assert res_file['object_id'][5] == 5
# test tf vs torch
cors = [np.corrcoef(res['encoding'][i], res_tf['encoding'][i])[0,1]
for i in range(res.shape[0])]
assert all(np.isclose(cors, 1))
# catch error if framework is invalid
with pytest.raises(ValueError) as err:
BertExtractor(framework='keras')
assert 'Invalid framework' in str(err.value)
# Delete the models
del res, res_token, res_file, ext_base, ext_base_token
@pytest.mark.parametrize('model',
['bert-large-uncased', 'distilbert-base-uncased',
'roberta-base', 'camembert-base'])
def test_bert_other_models(model):
if model == 'camembert-base':
stim = ComplexTextStim(text='ceci n\'est pas un pipe')
else:
stim = ComplexTextStim(text='This is not a tokenized sentence.')
res = BertExtractor(
pretrained_model=model, return_input=True).transform(stim).to_df()
if model == 'bert-large-uncased':
shape = 1024
else:
shape = 768
assert len(res['encoding'][0]) == shape
if model == 'camembert-base':
assert res['token'][4] == 'est'
# remove variables
del res, stim
def test_bert_sequence_extractor():
stim = ComplexTextStim(text='This is not a tokenized sentence.')
stim_file = ComplexTextStim(join(TEXT_DIR, 'sentence_with_header.txt'))
ext_pooler = BertSequenceEncodingExtractor(return_special='pooler_output')
# Test correct behavior when setting return_special
assert ext_pooler.pooling is None
assert ext_pooler.return_special == 'pooler_output'
res_sequence = BertSequenceEncodingExtractor(
return_input=True).transform(stim).to_df()
res_file = BertSequenceEncodingExtractor(
return_input=True).transform(stim_file).to_df()
res_cls = BertSequenceEncodingExtractor(
return_special='[CLS]').transform(stim).to_df()
res_pooler = ext_pooler.transform(stim).to_df()
res_max = BertSequenceEncodingExtractor(
pooling='max').transform(stim).to_df()
# Check shape
assert len(res_sequence['encoding'][0]) == 768
assert len(res_cls['encoding'][0]) == 768
assert len(res_pooler['encoding'][0]) == 768
assert len(res_max['encoding'][0]) == 768
assert res_sequence.shape[0] == 1
assert res_cls.shape[0] == 1
assert res_pooler.shape[0] == 1
assert res_max.shape[0] == 1
# Make sure pooler/cls/no arguments return different encodings
assert res_sequence['encoding'][0] != res_cls['encoding'][0]
assert res_sequence['encoding'][0] != res_pooler['encoding'][0]
assert res_sequence['encoding'][0] != res_max['encoding'][0]
assert all([res_max['encoding'][0][i] >= res_sequence['encoding'][0][i]
for i in range(768)])
# test return sequence
assert res_sequence['sequence'][0] == 'This is not a tokenized sentence .'
# test file stim
assert res_file['duration'][0] == 2.9
assert res_file['onset'][0] == 0.2
# catch error with wrong numpy function and wrong special token arg
with pytest.raises(ValueError) as err:
BertSequenceEncodingExtractor(pooling='avg')
assert 'valid numpy function' in str(err.value)
with pytest.raises(ValueError) as err:
BertSequenceEncodingExtractor(return_special='[MASK]')
assert 'must be one of' in str(err.value)
# remove variables
del ext_pooler, res_cls, res_max, res_pooler, res_sequence, res_file, stim
def test_bert_LM_extractor():
stim = ComplexTextStim(text='This is not a tokenized sentence.')
stim_masked = ComplexTextStim(text='This is MASK tokenized sentence.')
stim_file = ComplexTextStim(join(TEXT_DIR, 'sentence_with_header.txt'))
# Test mutual exclusivity and mask values
with pytest.raises(ValueError) as err:
BertLMExtractor(top_n=100, target='test')
assert 'mutually exclusive' in str(err.value)
with pytest.raises(ValueError) as err:
BertLMExtractor(top_n=100, threshold=.5)
assert 'mutually exclusive' in str(err.value)
with pytest.raises(ValueError) as err:
BertLMExtractor(target='test', threshold=.5)
assert 'mutually exclusive' in str(err.value)
with pytest.raises(ValueError) as err:
BertLMExtractor(mask=['test', 'mask'])
assert 'must be a string' in str(err.value)
with pytest.raises(ValueError) as err:
BertLMExtractor(target='nonwd')
assert 'No valid target token' in str(err.value)
target_wds = ['target', 'word']
ext_target = BertLMExtractor(mask=1, target=target_wds)
res = BertLMExtractor(mask=2).transform(stim).to_df()
res_file = BertLMExtractor(mask=2).transform(stim_file).to_df()
res_target = ext_target.transform(stim).to_df()
res_topn = BertLMExtractor(mask=3, top_n=100).transform(stim).to_df()
res_threshold = BertLMExtractor(
mask=4, threshold=.1, return_softmax=True).transform(stim).to_df()
res_default = BertLMExtractor().transform(stim_masked).to_df()
res_return_mask = BertLMExtractor(
mask=1, top_n=10, return_masked_word=True, return_input=True).transform(stim).to_df()
assert res.shape[0] == 1
# test onset/duration
assert res_file['onset'][0] == 1.0
assert res_file['duration'][0] == 0.2
# Check target words
assert all([w.capitalize() in res_target.columns for w in target_wds])
assert res_target.shape[1] == 6
# Check top_n
assert res_topn.shape[1] == 104
assert all([res_topn.iloc[:, 3][0] > res_topn.iloc[:, i][0] for i in range(4, 103)])
# Check threshold and range
tknz = BertTokenizer.from_pretrained('bert-base-uncased')
vocab = tknz.vocab.keys()
for v in vocab:
if v.capitalize() in res_threshold.columns:
assert res_threshold[v.capitalize()][0] >= .1
assert res_threshold[v.capitalize()][0] <= 1
# Test update mask method
assert ext_target.mask == 1
ext_target.update_mask(new_mask='sentence')
assert ext_target.mask == 'sentence'
res_target_new = ext_target.transform(stim).to_df()
assert all([res_target[c][0] != res_target_new[c][0]
for c in ['Target', 'Word']])
with pytest.raises(ValueError) as err:
ext_target.update_mask(new_mask=['some', 'mask'])
assert 'must be a string' in str(err.value)
# Test default mask
assert res_default.shape[0] == 1
# Test return mask and input
assert res_return_mask['true_word'][0] == 'is'
assert 'true_word_score' in res_return_mask.columns
assert res_return_mask['sequence'][0] == 'This is not a tokenized sentence .'
# Make sure no non-ascii tokens are dropped
assert res.shape[1] == len(vocab) + 4
# remove variables
del ext_target, res, res_file, res_target, res_topn, \
res_threshold, res_default, res_return_mask
def test_bert_sentiment_extractor():
stim = ComplexTextStim(text='This is the best day of my life.')
stim_file = ComplexTextStim(join(TEXT_DIR, 'sentence_with_header.txt'))
res = BertSentimentExtractor().transform(stim).to_df()
res_file = BertSentimentExtractor().transform(stim_file).to_df()
res_seq = BertSentimentExtractor(return_input=True).transform(stim).to_df()
res_softmax = BertSentimentExtractor(
return_softmax=True).transform(stim).to_df()
assert res.shape[0] == 1
assert res_file['onset'][0] == 0.2
assert res_file['duration'][0] == 2.9
assert all([s in res.columns for s in ['sent_pos', 'sent_neg']])
assert res_seq['sequence'][0] == 'This is the best day of my life .'
assert all([res_softmax[s][0] >= 0 for s in ['sent_pos', 'sent_neg']])
assert all([res_softmax[s][0] <= 1 for s in ['sent_pos', 'sent_neg']])
# remove variables
del res, res_file, res_seq, res_softmax
@pytest.mark.parametrize('hop_size', [0.1, 1])
@pytest.mark.parametrize('top_n', [5, 10])
@pytest.mark.parametrize('target_sr', [22000, 14000])
def test_audioset_extractor(hop_size, top_n, target_sr):
verify_dependencies(['tensorflow'])
def compute_expected_length(stim, ext):
stft_par = ext.params.STFT_WINDOW_SECONDS - ext.params.STFT_HOP_SECONDS
tot_window = ext.params.PATCH_WINDOW_SECONDS + stft_par
ons = np.arange(
start=0, stop=stim.duration - tot_window, step=hop_size)
return len(ons)
audio_stim = AudioStim(join(AUDIO_DIR, 'crowd.mp3'))
audio_filter = AudioResamplingFilter(target_sr=target_sr)
audio_resampled = audio_filter.transform(audio_stim)
# test with defaults and 44100 stimulus
ext = AudiosetLabelExtractor(hop_size=hop_size)
r_orig = ext.transform(audio_stim).to_df()
assert r_orig.shape[0] == compute_expected_length(audio_stim, ext)
assert r_orig.shape[1] == 525
assert np.argmax(r_orig.to_numpy()[:, 4:].mean(axis=0)) == 0
assert r_orig['duration'][0] == .975
assert all([np.isclose(r_orig['onset'][i] - r_orig['onset'][i-1], hop_size)
for i in range(1, r_orig.shape[0])])
# test resampled audio length and errors
if target_sr >= 14500:
r_resampled = ext.transform(audio_resampled).to_df()
assert r_orig.shape[0] == r_resampled.shape[0]
else:
with pytest.raises(ValueError) as sr_error:
ext.transform(audio_resampled)
assert all([substr in str(sr_error.value)
for substr in ['Upsample', str(target_sr)]])
# test top_n option
ext_top_n = AudiosetLabelExtractor(top_n=top_n)
r_top_n = ext_top_n.transform(audio_stim).to_df()
assert r_top_n.shape[1] == ext_top_n.top_n + 4
assert np.argmax(r_top_n.to_numpy()[:, 4:].mean(axis=0)) == 0
# test label subset
labels = ['Speech', 'Silence', 'Harmonic', 'Bark', 'Music', 'Bell',
'Steam', 'Rain']
ext_labels_only = AudiosetLabelExtractor(labels=labels)
r_labels_only = ext_labels_only.transform(audio_stim).to_df()
assert r_labels_only.shape[1] == len(labels) + 4
# test top_n/labels error
with pytest.raises(ValueError) as err:
AudiosetLabelExtractor(top_n=10, labels=labels)
assert 'Top_n and labels are mutually exclusive' in str(err.value)
|
|
#!/usr/bin/env python
from groupflow_shared import *
from mininet.net import *
from mininet.node import OVSSwitch, UserSwitch
from mininet.link import TCLink
from mininet.log import setLogLevel
from mininet.cli import CLI
from mininet.node import Node, RemoteController
from scipy.stats import truncnorm
from numpy.random import randint, uniform
from subprocess import *
import sys
import signal
from time import sleep, time
from datetime import datetime
from multiprocessing import Process, Pipe
import numpy as np
ENABLE_FIXED_GROUP_SIZE = True
FIXED_GROUP_SIZE = 4
def mcastTest(topo, interactive = False, hosts = [], log_file_name = 'test_log.log', util_link_weight = 10, link_weight_type = 'linear', replacement_mode='none', pipe = None):
membership_mean = 0.1
membership_std_dev = 0.25
membership_avg_bound = float(len(hosts)) / 8.0
test_groups = []
test_group_launch_times = []
test_success = True
# Launch the external controller
pox_arguments = []
if 'periodic' in replacement_mode:
pox_arguments = ['/usr/local/home/cse222a05/pox/pox.py', 'log', '--file=pox.log,w', 'openflow.discovery', '--link_timeout=30', 'openflow.keepalive',
'openflow.flow_tracker', '--query_interval=1', '--link_max_bw=19', '--link_cong_threshold=13', '--avg_smooth_factor=0.5', '--log_peak_usage=True',
'misc.benchmark_terminator', 'openflow.igmp_manager', 'misc.groupflow_event_tracer',
'openflow.groupflow', '--util_link_weight=' + str(util_link_weight), '--link_weight_type=' + link_weight_type, '--flow_replacement_mode=' + replacement_mode,
'--flow_replacement_interval=15',
'log.level', '--WARNING', '--openflow.flow_tracker=INFO']
else:
pox_arguments = ['/usr/local/home/cse222a05/pox/pox.py', 'log', '--file=pox.log,w', 'openflow.discovery', '--link_timeout=30', 'openflow.keepalive',
'openflow.flow_tracker', '--query_interval=1', '--link_max_bw=19', '--link_cong_threshold=13', '--avg_smooth_factor=0.5', '--log_peak_usage=True',
'misc.benchmark_terminator', 'openflow.igmp_manager', 'misc.groupflow_event_tracer',
'openflow.groupflow', '--util_link_weight=' + str(util_link_weight), '--link_weight_type=' + link_weight_type, '--flow_replacement_mode=' + replacement_mode,
'--flow_replacement_interval=15',
'log.level', '--WARNING', '--openflow.flow_tracker=INFO']
print 'Launching external controller: ' + str(pox_arguments[0])
print 'Launch arguments:'
print ' '.join(pox_arguments)
with open(os.devnull, "w") as fnull:
pox_process = Popen(pox_arguments, stdout=fnull, stderr=fnull, shell=False, close_fds=True)
# Allow time for the log file to be generated
sleep(1)
# Determine the flow tracker log file
pox_log_file = open('./pox.log', 'r')
flow_log_path = None
event_log_path = None
got_flow_log_path = False
got_event_log_path = False
while (not got_flow_log_path) or (not got_event_log_path):
pox_log = pox_log_file.readline()
if 'Writing flow tracker info to file:' in pox_log:
pox_log_split = pox_log.split()
flow_log_path = pox_log_split[-1]
got_flow_log_path = True
if 'Writing event trace info to file:' in pox_log:
pox_log_split = pox_log.split()
event_log_path = pox_log_split[-1]
got_event_log_path = True
print 'Got flow tracker log file: ' + str(flow_log_path)
print 'Got event trace log file: ' + str(event_log_path)
print 'Controller initialized'
pox_log_offset = pox_log_file.tell()
pox_log_file.close()
# External controller
net = Mininet(topo, controller=RemoteController, switch=OVSSwitch, link=TCLink, build=False, autoSetMacs=True)
#pox = RemoteController('pox', '127.0.0.1', 6633)
net.addController('pox', RemoteController, ip = '127.0.0.1', port = 6633)
net.start()
for switch_name in topo.get_switch_list():
#print switch_name + ' route add -host 127.0.0.1 dev lo'
net.get(switch_name).controlIntf = net.get(switch_name).intf('lo')
net.get(switch_name).cmd('route add -host 127.0.0.1 dev lo')
#print 'pox' + ' route add -host ' + net.get(switch_name).IP() + ' dev lo'
net.get('pox').cmd('route add -host ' + net.get(switch_name).IP() + ' dev lo')
#print net.get(switch_name).cmd('ifconfig')
topo.mcastConfig(net)
#print 'Controller network configuration:'
#print net.get('pox').cmd('ifconfig')
#print net.get('pox').cmd('route')
sleep_time = 8 + (float(len(hosts))/8)
print 'Waiting ' + str(sleep_time) + ' seconds to allow for controller topology discovery'
sleep(sleep_time)
try:
if interactive:
CLI(net)
else:
mcast_group_last_octet = 1
mcast_port = 5010
rand_seed = int(time())
print 'Using random seed: ' + str(rand_seed)
np.random.seed(rand_seed)
host_join_probabilities = generate_group_membership_probabilities(hosts, membership_mean, membership_std_dev, membership_avg_bound)
print 'Host join probabilities: ' + ', '.join(str(p) for p in host_join_probabilities)
host_join_sum = sum(p[1] for p in host_join_probabilities)
print 'Measured mean join probability: ' + str(host_join_sum / len(host_join_probabilities))
print 'Predicted average group size: ' + str(host_join_sum)
i = 0
congested_switch_num_links = 0
n = 1
#run(hosts, host_join_probabilities, mcast_group_last_octet, test_groups, i, mcast_port, test_group_launch_times, net)
while i < n:
print 'Generating multicast group #' + str(i)
# Choose a sending host using a uniform random distribution
sender_index = randint(0,len(hosts))
sender_host = hosts[sender_index]
receivers = []
if ENABLE_FIXED_GROUP_SIZE:
while len(receivers) < FIXED_GROUP_SIZE:
receiver_index = randint(0,len(hosts))
if receiver_index == sender_index:
continue
receivers.append(hosts[receiver_index])
receivers = list(set(receivers))
else:
# Choose a random number of receivers by comparing a uniform random variable
# against the previously generated group membership probabilities
for host_prob in host_join_probabilities:
p = uniform(0, 1)
if p <= host_prob[1]:
receivers.append(host_prob[0])
# Initialize the group
# Note - This method of group IP generation will need to be modified slightly to support more than
# 255 groups
mcast_ip = '224.1.1.{last_octet}'.format(last_octet = str(mcast_group_last_octet))
test_groups.append(StaticMulticastGroupDefinition(sender_host, receivers, mcast_ip, mcast_port, mcast_port + 1))
launch_time = time()
test_group_launch_times.append(launch_time)
print 'Launching multicast group #' + str(i) + ' at time: ' + str(launch_time)
print 'Sender: ' + str(sender_host)
print 'Receivers: ' + str(receivers)
test_groups[-1].launch_normal_mcast_applications(net)
mcast_group_last_octet = mcast_group_last_octet + 1
mcast_port = mcast_port + 2
i += 1
wait_time = 8 + uniform(0, 1)
# Read from the log file to determine if a link has become overloaded, and cease generating new groups if so
print 'Check for congested link...'
congested_link = False
pox_log_file = open('./pox.log', 'r')
pox_log_file.seek(pox_log_offset)
done_reading = False
while not done_reading:
line = pox_log_file.readline()
if 'Network peak link throughput (Mbps):' in line:
line_split = line.split(' ')
print 'Peak Usage (Mbps): ' + line_split[-1],
if 'Network avg link throughput (Mbps):' in line:
line_split = line.split(' ')
print 'Mean Usage (Mbps): ' + line_split[-1],
if 'FlowStats: Fully utilized link detected!' in line:
line_split = line.split(' ')
congested_link = True
done_reading = True
if 'Multicast topology changed, recalculating all paths' in line or 'Path could not be determined for receiver' in line:
print 'ERROR: Network topology changed unexpectedly.'
print line
test_success = False
done_reading = True
if time() - launch_time > wait_time:
done_reading = True
pox_log_offset = pox_log_file.tell()
pox_log_file.close()
if congested_link:
print 'Detected fully utilized link, terminating simulation.'
break
if not test_success:
print 'Detected network connectivity error, terminating simulation.'
break
else:
print 'No congestion detected.'
recv_packets = 0
lost_packets = 0
print 'Terminating network applications'
for group in test_groups:
group.terminate_mcast_applications()
print 'Terminating controller'
pox_process.send_signal(signal.SIGINT)
sleep(1)
print 'Waiting for network application termination...'
for group in test_groups:
group.wait_for_application_termination()
print 'Network applications terminated'
print 'Waiting for controller termination...'
pox_process.send_signal(signal.SIGKILL)
pox_process.wait()
print 'Controller terminated'
pox_process = None
net.stop()
if not interactive and test_success:
write_final_stats_log(log_file_name, flow_log_path, event_log_path, membership_mean, membership_std_dev, membership_avg_bound, test_groups, test_group_launch_times, topo)
if not test_success:
call('rm -rfv ' + str(flow_log_path), shell=True)
call('rm -rfv ' + str(event_log_path), shell=True)
except BaseException as e:
print str(e)
test_success = False
if pipe is not None:
pipe.send(test_success)
pipe.close()
topos = { 'mcast_test': ( lambda: FattreeTopo() ) }
def print_usage_text():
print 'GroupFlow Multicast Testing with Mininet'
print 'Usage:'
print '1) No arguments:'
print '> mininet_multicast_pox'
print 'If no arguments are provided, the script will launch a hard-coded test topology with Mininet in interactive mode.'
print ''
print '2) Custom topology:'
print '> mininet_multicast_pox <topology_path>'
print 'topology_path: If a single argument is given, the argument will be interpreted as a path to a BRITE topology. Otherwise, this functions identically to the no argument mode.'
print ''
print '3) Automated benchmarking:'
print '> mininet_multicast_pox <topology_path> <iterations_to_run> <log_file_prefix> <index_of_first_log_file> <parameter_sets (number is variable and unlimited)>'
print 'Parameter sets have the form: flow_replacement_mode,link_weight_type,util_link_weight'
print 'The topology path "manhattan" is currently hardcoded to generate a 20 Mbps, 5x5 Manhattan grid topology'
if __name__ == '__main__':
setLogLevel( 'info' )
if len(sys.argv) >= 2:
if '-h' in str(sys.argv[1]) or 'help' in str(sys.argv[1]):
print_usage_text()
sys.exit()
if len(sys.argv) >= 6:
# Automated simulations - Differing link usage weights in Groupflow Module
log_prefix = sys.argv[3]
num_iterations = int(sys.argv[2])
first_index = int(sys.argv[4])
util_params = []
for param_index in range(5, len(sys.argv)):
param_split = sys.argv[param_index].split(',')
util_params.append((param_split[0], param_split[1], float(param_split[2])))
topo = None
if 'manhattan' in sys.argv[1]:
print 'Generating Manhattan Grid Topology'
topo = ManhattanGridTopo(5, 5, 20, 1, True)
else:
print 'Generating BRITE Specified Topology'
topo = BriteTopo(sys.argv[1])
hosts = topo.get_host_list()
start_time = time()
num_success = 0
num_failure = 0
print 'Simulations started at: ' + str(datetime.now())
for i in range(0,num_iterations):
for util_param in util_params:
test_success = False
while not test_success:
parent_pipe, child_pipe = Pipe()
p = Process(target=mcastTest, args=(topo, False, hosts, log_prefix + '_' + ','.join([util_param[0], util_param[1], str(util_param[2])]) + '_' + str(i + first_index) + '.log', util_param[2], util_param[1], util_param[0], child_pipe))
sim_start_time = time()
p.start()
p.join()
sim_end_time = time()
# Make extra sure the network terminated cleanly
call(['python', 'kill_running_test.py'])
test_success = parent_pipe.recv()
parent_pipe.close()
print 'Test Success: ' + str(test_success)
if test_success:
num_success += 1
else:
num_failure += 1
print 'Simulation ' + str(i+1) + '_' + ','.join([util_param[0], util_param[1], str(util_param[2])]) + ' completed at: ' + str(datetime.now()) + ' (runtime: ' + str(sim_end_time - sim_start_time) + ' seconds)'
end_time = time()
print ' '
print 'Simulations completed at: ' + str(datetime.now())
print 'Total runtime: ' + str(end_time - start_time) + ' seconds'
print 'Average runtime per sim: ' + str((end_time - start_time) / (num_iterations * len(util_params))) + ' seconds'
print 'Number of failed sims: ' + str(num_failure)
print 'Number of successful sims: ' + str(num_success)
elif len(sys.argv) >= 2:
# Interactive mode - configures POX and multicast routes, but no automatic traffic generation
print 'Launching BRITE defined multicast test topology'
topo = BriteTopo(sys.argv[1])
hosts = topo.get_host_list()
mcastTest(topo, True, hosts)
else:
# Interactive mode with barebones topology
print 'Launching default multicast test topology'
topo = FattreeTopo()
hosts = topo.get_host_list()
mcastTest(topo, False, hosts)
|
|
import collections
import os
import os.path
import random
import re
import shlex
import subprocess
import sys
import tempfile
import time
import traceback
import pipes
import pexpect
print_log_names = False
real_stdout = sys.stdout
# Note that gdb comes with its own testsuite. I was unable to figure out how to
# run that testsuite against the spike simulator.
def find_file(path):
for directory in (os.getcwd(), os.path.dirname(__file__)):
fullpath = os.path.join(directory, path)
relpath = os.path.relpath(fullpath)
if len(relpath) >= len(fullpath):
relpath = fullpath
if os.path.exists(relpath):
return relpath
return None
def compile(args, xlen=32): # pylint: disable=redefined-builtin
cc = os.path.expandvars("$RISCV/bin/riscv64-unknown-elf-gcc")
cmd = [cc, "-g"]
if xlen == 32:
cmd.append("-march=rv32imac")
cmd.append("-mabi=ilp32")
else:
cmd.append("-march=rv64imac")
cmd.append("-mabi=lp64")
for arg in args:
found = find_file(arg)
if found:
cmd.append(found)
else:
cmd.append(arg)
header("Compile")
print "+", " ".join(cmd)
process = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if process.returncode:
print stdout,
print stderr,
header("")
raise Exception("Compile failed!")
class Spike(object):
def __init__(self, target, halted=False, timeout=None, with_jtag_gdb=True,
isa=None, progbufsize=None):
"""Launch spike. Return tuple of its process and the port it's running
on."""
self.process = None
self.isa = isa
self.progbufsize = progbufsize
if target.harts:
harts = target.harts
else:
harts = [target]
cmd = self.command(target, harts, halted, timeout, with_jtag_gdb)
self.infinite_loop = target.compile(harts[0],
"programs/checksum.c", "programs/tiny-malloc.c",
"programs/infinite_loop.S", "-DDEFINE_MALLOC", "-DDEFINE_FREE")
cmd.append(self.infinite_loop)
self.logfile = tempfile.NamedTemporaryFile(prefix="spike-",
suffix=".log")
self.logname = self.logfile.name
if print_log_names:
real_stdout.write("Temporary spike log: %s\n" % self.logname)
self.logfile.write("+ %s\n" % " ".join(cmd))
self.logfile.flush()
self.process = subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=self.logfile, stderr=self.logfile)
if with_jtag_gdb:
self.port = None
for _ in range(30):
m = re.search(r"Listening for remote bitbang connection on "
r"port (\d+).", open(self.logname).read())
if m:
self.port = int(m.group(1))
os.environ['REMOTE_BITBANG_PORT'] = m.group(1)
break
time.sleep(0.11)
if not self.port:
print_log(self.logname)
raise Exception("Didn't get spike message about bitbang "
"connection")
def command(self, target, harts, halted, timeout, with_jtag_gdb):
# pylint: disable=no-self-use
if target.sim_cmd:
cmd = shlex.split(target.sim_cmd)
else:
spike = os.path.expandvars("$RISCV/bin/spike")
cmd = [spike]
cmd += ["-p%d" % len(harts)]
assert len(set(t.xlen for t in harts)) == 1, \
"All spike harts must have the same XLEN"
if self.isa:
isa = self.isa
else:
isa = "RV%dG" % harts[0].xlen
cmd += ["--isa", isa]
cmd += ["--debug-auth"]
if not self.progbufsize is None:
cmd += ["--progsize", str(self.progbufsize)]
cmd += ["--debug-sba", "32"]
assert len(set(t.ram for t in harts)) == 1, \
"All spike harts must have the same RAM layout"
assert len(set(t.ram_size for t in harts)) == 1, \
"All spike harts must have the same RAM layout"
cmd += ["-m0x%x:0x%x" % (harts[0].ram, harts[0].ram_size)]
if timeout:
cmd = ["timeout", str(timeout)] + cmd
if halted:
cmd.append('-H')
if with_jtag_gdb:
cmd += ['--rbb-port', '0']
os.environ['REMOTE_BITBANG_HOST'] = 'localhost'
return cmd
def __del__(self):
if self.process:
try:
self.process.kill()
self.process.wait()
except OSError:
pass
def wait(self, *args, **kwargs):
return self.process.wait(*args, **kwargs)
class VcsSim(object):
logfile = tempfile.NamedTemporaryFile(prefix='simv', suffix='.log')
logname = logfile.name
def __init__(self, sim_cmd=None, debug=False, timeout=300):
if sim_cmd:
cmd = shlex.split(sim_cmd)
else:
cmd = ["simv"]
cmd += ["+jtag_vpi_enable"]
if debug:
cmd[0] = cmd[0] + "-debug"
cmd += ["+vcdplusfile=output/gdbserver.vpd"]
logfile = open(self.logname, "w")
if print_log_names:
real_stdout.write("Temporary VCS log: %s\n" % self.logname)
logfile.write("+ %s\n" % " ".join(cmd))
logfile.flush()
listenfile = open(self.logname, "r")
listenfile.seek(0, 2)
self.process = subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=logfile, stderr=logfile)
done = False
start = time.time()
while not done:
# Fail if VCS exits early
exit_code = self.process.poll()
if exit_code is not None:
raise RuntimeError('VCS simulator exited early with status %d'
% exit_code)
line = listenfile.readline()
if not line:
time.sleep(1)
match = re.match(r"^Listening on port (\d+)$", line)
if match:
done = True
self.port = int(match.group(1))
os.environ['JTAG_VPI_PORT'] = str(self.port)
if (time.time() - start) > timeout:
raise Exception("Timed out waiting for VCS to listen for JTAG "
"vpi")
def __del__(self):
try:
self.process.kill()
self.process.wait()
except OSError:
pass
class Openocd(object):
logfile = tempfile.NamedTemporaryFile(prefix='openocd', suffix='.log')
logname = logfile.name
def __init__(self, server_cmd=None, config=None, debug=False, timeout=60):
self.timeout = timeout
if server_cmd:
cmd = shlex.split(server_cmd)
else:
openocd = os.path.expandvars("$RISCV/bin/openocd")
cmd = [openocd]
if debug:
cmd.append("-d")
# This command needs to come before any config scripts on the command
# line, since they are executed in order.
cmd += [
# Tell OpenOCD to bind gdb to an unused, ephemeral port.
"--command",
"gdb_port 0",
# Disable tcl and telnet servers, since they are unused and because
# the port numbers will conflict if multiple OpenOCD processes are
# running on the same server.
"--command",
"tcl_port disabled",
"--command",
"telnet_port disabled",
]
if config:
f = find_file(config)
if f is None:
print "Unable to read file " + config
exit(1)
cmd += ["-f", f]
if debug:
cmd.append("-d")
logfile = open(Openocd.logname, "w")
if print_log_names:
real_stdout.write("Temporary OpenOCD log: %s\n" % Openocd.logname)
env_entries = ("REMOTE_BITBANG_HOST", "REMOTE_BITBANG_PORT")
env_entries = [key for key in env_entries if key in os.environ]
logfile.write("+ %s%s\n" % (
"".join("%s=%s " % (key, os.environ[key]) for key in env_entries),
" ".join(map(pipes.quote, cmd))))
logfile.flush()
self.gdb_ports = []
self.process = self.start(cmd, logfile)
def start(self, cmd, logfile):
process = subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=logfile, stderr=logfile)
try:
# Wait for OpenOCD to have made it through riscv_examine(). When
# using OpenOCD to communicate with a simulator this may take a
# long time, and gdb will time out when trying to connect if we
# attempt too early.
start = time.time()
messaged = False
fd = open(Openocd.logname, "r")
while True:
line = fd.readline()
if not line:
if not process.poll() is None:
raise Exception("OpenOCD exited early.")
time.sleep(0.1)
continue
m = re.search(r"Listening on port (\d+) for gdb connections",
line)
if m:
self.gdb_ports.append(int(m.group(1)))
if "telnet server disabled" in line:
return process
if not messaged and time.time() - start > 1:
messaged = True
print "Waiting for OpenOCD to start..."
if (time.time() - start) > self.timeout:
raise Exception("Timed out waiting for OpenOCD to "
"listen for gdb")
except Exception:
print_log(Openocd.logname)
raise
def __del__(self):
try:
self.process.kill()
self.process.wait()
except (OSError, AttributeError):
pass
class OpenocdCli(object):
def __init__(self, port=4444):
self.child = pexpect.spawn(
"sh -c 'telnet localhost %d | tee openocd-cli.log'" % port)
self.child.expect("> ")
def command(self, cmd):
self.child.sendline(cmd)
self.child.expect(cmd)
self.child.expect("\n")
self.child.expect("> ")
return self.child.before.strip("\t\r\n \0")
def reg(self, reg=''):
output = self.command("reg %s" % reg)
matches = re.findall(r"(\w+) \(/\d+\): (0x[0-9A-F]+)", output)
values = {r: int(v, 0) for r, v in matches}
if reg:
return values[reg]
return values
def load_image(self, image):
output = self.command("load_image %s" % image)
if 'invalid ELF file, only 32bits files are supported' in output:
raise TestNotApplicable(output)
class CannotAccess(Exception):
def __init__(self, address):
Exception.__init__(self)
self.address = address
class CouldNotFetch(Exception):
def __init__(self, regname, explanation):
Exception.__init__(self)
self.regname = regname
self.explanation = explanation
Thread = collections.namedtuple('Thread', ('id', 'description', 'target_id',
'name', 'frame'))
class Gdb(object):
"""A single gdb class which can interact with one or more gdb instances."""
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-instance-attributes
def __init__(self, ports,
cmd=os.path.expandvars("$RISCV/bin/riscv64-unknown-elf-gdb"),
timeout=60, binary=None):
assert ports
self.ports = ports
self.cmd = cmd
self.timeout = timeout
self.binary = binary
self.stack = []
self.harts = {}
self.logfiles = []
self.children = []
for port in ports:
logfile = tempfile.NamedTemporaryFile(prefix="gdb@%d-" % port,
suffix=".log")
self.logfiles.append(logfile)
if print_log_names:
real_stdout.write("Temporary gdb log: %s\n" % logfile.name)
child = pexpect.spawn(cmd)
child.logfile = logfile
child.logfile.write("+ %s\n" % cmd)
self.children.append(child)
self.active_child = self.children[0]
def connect(self):
for port, child in zip(self.ports, self.children):
self.select_child(child)
self.wait()
self.command("set confirm off")
self.command("set width 0")
self.command("set height 0")
# Force consistency.
self.command("set print entry-values no")
self.command("set remotetimeout %d" % self.timeout)
self.command("target extended-remote localhost:%d" % port)
if self.binary:
self.command("file %s" % self.binary)
threads = self.threads()
for t in threads:
hartid = None
if t.name:
m = re.search(r"Hart (\d+)", t.name)
if m:
hartid = int(m.group(1))
if hartid is None:
if self.harts:
hartid = max(self.harts) + 1
else:
hartid = 0
# solo: True iff this is the only thread on this child
self.harts[hartid] = {'child': child,
'thread': t,
'solo': len(threads) == 1}
def __del__(self):
for child in self.children:
del child
def one_hart_per_gdb(self):
return all(h['solo'] for h in self.harts.itervalues())
def lognames(self):
return [logfile.name for logfile in self.logfiles]
def select_child(self, child):
self.active_child = child
def select_hart(self, hart):
h = self.harts[hart.id]
self.select_child(h['child'])
if not h['solo']:
output = self.command("thread %s" % h['thread'].id, ops=5)
assert "Unknown" not in output
def push_state(self):
self.stack.append({
'active_child': self.active_child
})
def pop_state(self):
state = self.stack.pop()
self.active_child = state['active_child']
def wait(self):
"""Wait for prompt."""
self.active_child.expect(r"\(gdb\)")
def command(self, command, ops=1):
"""ops is the estimated number of operations gdb will have to perform
to perform this command. It is used to compute a timeout based on
self.timeout."""
timeout = ops * self.timeout
self.active_child.sendline(command)
self.active_child.expect("\n", timeout=timeout)
self.active_child.expect(r"\(gdb\)", timeout=timeout)
return self.active_child.before.strip()
def global_command(self, command):
"""Execute this command on every gdb that we control."""
with PrivateState(self):
for child in self.children:
self.select_child(child)
self.command(command)
def c(self, wait=True, async=False):
"""
Dumb c command.
In RTOS mode, gdb will resume all harts.
In multi-gdb mode, this command will just go to the current gdb, so
will only resume one hart.
"""
if async:
async = "&"
else:
async = ""
ops = 10
if wait:
output = self.command("c%s" % async, ops=ops)
assert "Continuing" in output
return output
else:
self.active_child.sendline("c%s" % async)
self.active_child.expect("Continuing", timeout=ops * self.timeout)
def c_all(self):
"""
Resume every hart.
This function works fine when using multiple gdb sessions, but the
caller must be careful when using it nonetheless. gdb's behavior is to
not set breakpoints until just before the hart is resumed, and then
clears them as soon as the hart halts. That means that you can't set
one software breakpoint, and expect multiple harts to hit it. It's
possible that the first hart completes set/run/halt/clear before the
second hart even gets to resume, so it will never hit the breakpoint.
"""
with PrivateState(self):
for child in self.children:
child.sendline("c")
child.expect("Continuing")
# Now wait for them all to halt
for child in self.children:
child.expect(r"\(gdb\)")
def interrupt(self):
self.active_child.send("\003")
self.active_child.expect(r"\(gdb\)", timeout=6000)
return self.active_child.before.strip()
def x(self, address, size='w'):
output = self.command("x/%s %s" % (size, address))
value = int(output.split(':')[1].strip(), 0)
return value
def p_raw(self, obj):
output = self.command("p %s" % obj)
m = re.search("Cannot access memory at address (0x[0-9a-f]+)", output)
if m:
raise CannotAccess(int(m.group(1), 0))
return output.split('=')[-1].strip()
def parse_string(self, text):
text = text.strip()
if text.startswith("{") and text.endswith("}"):
inner = text[1:-1]
return [self.parse_string(t) for t in inner.split(", ")]
elif text.startswith('"') and text.endswith('"'):
return text[1:-1]
else:
return int(text, 0)
def p(self, obj, fmt="/x"):
output = self.command("p%s %s" % (fmt, obj))
m = re.search("Cannot access memory at address (0x[0-9a-f]+)", output)
if m:
raise CannotAccess(int(m.group(1), 0))
m = re.search(r"Could not fetch register \"(\w+)\"; (.*)$", output)
if m:
raise CouldNotFetch(m.group(1), m.group(2))
rhs = output.split('=')[-1]
return self.parse_string(rhs)
def p_string(self, obj):
output = self.command("p %s" % obj)
value = shlex.split(output.split('=')[-1].strip())[1]
return value
def stepi(self):
output = self.command("stepi", ops=10)
return output
def load(self):
output = self.command("load", ops=1000)
assert "failed" not in output
assert "Transfer rate" in output
def b(self, location):
output = self.command("b %s" % location, ops=5)
assert "not defined" not in output
assert "Breakpoint" in output
return output
def hbreak(self, location):
output = self.command("hbreak %s" % location, ops=5)
assert "not defined" not in output
assert "Hardware assisted breakpoint" in output
return output
def threads(self):
output = self.command("info threads", ops=100)
threads = []
for line in output.splitlines():
m = re.match(
r"[\s\*]*(\d+)\s*"
r"(Remote target|Thread (\d+)\s*\(Name: ([^\)]+))"
r"\s*(.*)", line)
if m:
threads.append(Thread(*m.groups()))
assert threads
#>>>if not threads:
#>>> threads.append(Thread('1', '1', 'Default', '???'))
return threads
def thread(self, thread):
return self.command("thread %s" % thread.id)
def where(self):
return self.command("where 1")
class PrivateState(object):
def __init__(self, gdb):
self.gdb = gdb
def __enter__(self):
self.gdb.push_state()
def __exit__(self, _type, _value, _traceback):
self.gdb.pop_state()
def run_all_tests(module, target, parsed):
try:
os.makedirs(parsed.logs)
except OSError:
# There's a race where multiple instances of the test program might
# decide to create the logs directory at the same time.
pass
overall_start = time.time()
global gdb_cmd # pylint: disable=global-statement
gdb_cmd = parsed.gdb
todo = []
examine_added = False
for hart in target.harts:
if parsed.misaval:
hart.misa = int(parsed.misaval, 16)
print "Using $misa from command line: 0x%x" % hart.misa
elif hart.misa:
print "Using $misa from hart definition: 0x%x" % hart.misa
elif not examine_added:
todo.append(("ExamineTarget", ExamineTarget, None))
examine_added = True
for name in dir(module):
definition = getattr(module, name)
if isinstance(definition, type) and hasattr(definition, 'test') and \
(not parsed.test or any(test in name for test in parsed.test)):
todo.append((name, definition, None))
results, count = run_tests(parsed, target, todo)
header("ran %d tests in %.0fs" % (count, time.time() - overall_start),
dash=':')
return print_results(results)
good_results = set(('pass', 'not_applicable'))
def run_tests(parsed, target, todo):
results = {}
count = 0
for name, definition, hart in todo:
log_name = os.path.join(parsed.logs, "%s-%s-%s.log" %
(time.strftime("%Y%m%d-%H%M%S"), type(target).__name__, name))
log_fd = open(log_name, 'w')
print "[%s] Starting > %s" % (name, log_name)
instance = definition(target, hart)
sys.stdout.flush()
log_fd.write("Test: %s\n" % name)
log_fd.write("Target: %s\n" % type(target).__name__)
start = time.time()
global real_stdout # pylint: disable=global-statement
real_stdout = sys.stdout
sys.stdout = log_fd
try:
result = instance.run()
log_fd.write("Result: %s\n" % result)
log_fd.write("Logfile: %s\n" % log_name)
finally:
sys.stdout = real_stdout
log_fd.write("Time elapsed: %.2fs\n" % (time.time() - start))
log_fd.flush()
print "[%s] %s in %.2fs" % (name, result, time.time() - start)
if result not in good_results and parsed.print_failures:
sys.stdout.write(open(log_name).read())
sys.stdout.flush()
results.setdefault(result, []).append((name, log_name))
count += 1
if result not in good_results and parsed.fail_fast:
break
return results, count
def print_results(results):
result = 0
for key, value in results.iteritems():
print "%d tests returned %s" % (len(value), key)
if key not in good_results:
result = 1
for name, log_name in value:
print " %s > %s" % (name, log_name)
return result
def add_test_run_options(parser):
parser.add_argument("--logs", default="logs",
help="Store logs in the specified directory.")
parser.add_argument("--fail-fast", "-f", action="store_true",
help="Exit as soon as any test fails.")
parser.add_argument("--print-failures", action="store_true",
help="When a test fails, print the log file to stdout.")
parser.add_argument("--print-log-names", "--pln", action="store_true",
help="Print names of temporary log files as soon as they are "
"created.")
parser.add_argument("test", nargs='*',
help="Run only tests that are named here.")
parser.add_argument("--gdb",
help="The command to use to start gdb.")
parser.add_argument("--misaval",
help="Don't run ExamineTarget, just assume the misa value which is "
"specified.")
def header(title, dash='-', length=78):
if title:
dashes = dash * (length - 4 - len(title))
before = dashes[:len(dashes)/2]
after = dashes[len(dashes)/2:]
print "%s[ %s ]%s" % (before, title, after)
else:
print dash * length
def print_log(path):
header(path)
for l in open(path, "r"):
sys.stdout.write(l)
print
class BaseTest(object):
compiled = {}
def __init__(self, target, hart=None):
self.target = target
if hart:
self.hart = hart
else:
self.hart = random.choice(target.harts)
self.hart = target.harts[-1] #<<<
self.server = None
self.target_process = None
self.binary = None
self.start = 0
self.logs = []
def early_applicable(self):
"""Return a false value if the test has determined it cannot run
without ever needing to talk to the target or server."""
# pylint: disable=no-self-use
return True
def setup(self):
pass
def compile(self):
compile_args = getattr(self, 'compile_args', None)
if compile_args:
if compile_args not in BaseTest.compiled:
BaseTest.compiled[compile_args] = \
self.target.compile(self.hart, *compile_args)
self.binary = BaseTest.compiled.get(compile_args)
def classSetup(self):
self.compile()
self.target_process = self.target.create()
if self.target_process:
self.logs.append(self.target_process.logname)
try:
self.server = self.target.server()
self.logs.append(self.server.logname)
except Exception:
for log in self.logs:
print_log(log)
raise
def classTeardown(self):
del self.server
del self.target_process
def postMortem(self):
pass
def run(self):
"""
If compile_args is set, compile a program and set self.binary.
Call setup().
Then call test() and return the result, displaying relevant information
if an exception is raised.
"""
sys.stdout.flush()
if not self.early_applicable():
return "not_applicable"
self.start = time.time()
try:
self.classSetup()
self.setup()
result = self.test() # pylint: disable=no-member
except TestNotApplicable:
result = "not_applicable"
except Exception as e: # pylint: disable=broad-except
if isinstance(e, TestFailed):
result = "fail"
else:
result = "exception"
if isinstance(e, TestFailed):
header("Message")
print e.message
header("Traceback")
traceback.print_exc(file=sys.stdout)
try:
self.postMortem()
except Exception as e: # pylint: disable=broad-except
header("postMortem Exception")
print e
traceback.print_exc(file=sys.stdout)
return result
finally:
for log in self.logs:
print_log(log)
header("End of logs")
self.classTeardown()
if not result:
result = 'pass'
return result
gdb_cmd = None
class GdbTest(BaseTest):
def __init__(self, target, hart=None):
BaseTest.__init__(self, target, hart=hart)
self.gdb = None
def classSetup(self):
BaseTest.classSetup(self)
if gdb_cmd:
self.gdb = Gdb(self.server.gdb_ports, gdb_cmd,
timeout=self.target.timeout_sec, binary=self.binary)
else:
self.gdb = Gdb(self.server.gdb_ports,
timeout=self.target.timeout_sec, binary=self.binary)
self.logs += self.gdb.lognames()
self.gdb.connect()
self.gdb.global_command("set remotetimeout %d" %
self.target.timeout_sec)
for cmd in self.target.gdb_setup:
self.gdb.command(cmd)
self.gdb.select_hart(self.hart)
# FIXME: OpenOCD doesn't handle PRIV now
#self.gdb.p("$priv=3")
def postMortem(self):
if not self.gdb:
return
self.gdb.interrupt()
self.gdb.command("disassemble", ops=20)
self.gdb.command("info registers all", ops=100)
def classTeardown(self):
del self.gdb
BaseTest.classTeardown(self)
class GdbSingleHartTest(GdbTest):
def classSetup(self):
GdbTest.classSetup(self)
for hart in self.target.harts:
# Park all harts that we're not using in a safe place.
if hart != self.hart:
self.gdb.select_hart(hart)
self.gdb.p("$pc=loop_forever")
self.gdb.select_hart(self.hart)
class ExamineTarget(GdbTest):
def test(self):
for hart in self.target.harts:
self.gdb.select_hart(hart)
hart.misa = self.gdb.p("$misa")
txt = "RV"
misa_xlen = 0
if ((hart.misa & 0xFFFFFFFF) >> 30) == 1:
misa_xlen = 32
elif ((hart.misa & 0xFFFFFFFFFFFFFFFF) >> 62) == 2:
misa_xlen = 64
elif ((hart.misa & 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF) >> 126) == 3:
misa_xlen = 128
else:
raise TestFailed("Couldn't determine XLEN from $misa (0x%x)" %
self.hart.misa)
if misa_xlen != hart.xlen:
raise TestFailed("MISA reported XLEN of %d but we were "\
"expecting XLEN of %d\n" % (misa_xlen, hart.xlen))
txt += ("%d" % misa_xlen)
for i in range(26):
if hart.misa & (1<<i):
txt += chr(i + ord('A'))
print txt,
class TestFailed(Exception):
def __init__(self, message):
Exception.__init__(self)
self.message = message
class TestNotApplicable(Exception):
def __init__(self, message):
Exception.__init__(self)
self.message = message
def assertEqual(a, b):
if a != b:
raise TestFailed("%r != %r" % (a, b))
def assertNotEqual(a, b):
if a == b:
raise TestFailed("%r == %r" % (a, b))
def assertIn(a, b):
if a not in b:
raise TestFailed("%r not in %r" % (a, b))
def assertNotIn(a, b):
if a in b:
raise TestFailed("%r in %r" % (a, b))
def assertGreater(a, b):
if not a > b:
raise TestFailed("%r not greater than %r" % (a, b))
def assertLess(a, b):
if not a < b:
raise TestFailed("%r not less than %r" % (a, b))
def assertTrue(a):
if not a:
raise TestFailed("%r is not True" % a)
def assertRegexpMatches(text, regexp):
if not re.search(regexp, text):
raise TestFailed("can't find %r in %r" % (regexp, text))
|
|
# -*- coding: utf-8 -*-
"""
Asynchronous Task Execution
- falls back to Synchronous if no workers are alive
Worker nodes won't run on Win32 yet.
To run a worker node: python web2py.py -K eden
NB
Need WEB2PY_PATH environment variable to be defined (e.g. /etc/profile)
Tasks need to be defined outside conditional model loads
Avoid passing state into the async call as state may change before the message is executed (race condition)
Old screencast: http://www.vimeo.com/27478796
@requires: U{B{I{gluon}} <http://web2py.com>}
@author: Fran Boon <fran[at]aidiq.com>
@copyright: 2011 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["S3Task"]
import datetime
from gluon import HTTP, current
from gluon.storage import Storage
import gluon.contrib.simplejson as json
from s3widgets import S3TimeIntervalWidget
from s3validators import IS_TIME_INTERVAL_WIDGET
def s3_debug(message, value=None):
"""
Provide an easy, safe, systematic way of handling Debug output
(print to stdout doesn't work with WSGI deployments)
"""
try:
output = "S3 Debug: %s" % str(message)
if value:
output += ": %s" % str(value)
except:
output = "S3 Debug: %s" % unicode(message)
if value:
output += ": %s" % unicode(value)
import sys
print >> sys.stderr, output
# -----------------------------------------------------------------------------
class S3Task(object):
""" Asynchronous Task Execution """
TASK_TABLENAME = "scheduler_task"
# -------------------------------------------------------------------------
def __init__(self):
migrate = current.deployment_settings.get_base_migrate()
tasks = current.response.s3.tasks
# Instantiate Scheduler
try:
from gluon.scheduler import Scheduler
except:
# Warning should already have been given by eden_update_check.py
self.scheduler = None
else:
self.scheduler = Scheduler(current.db,
tasks,
migrate=migrate)
# -------------------------------------------------------------------------
def configure_tasktable_crud(self,
task=None,
function=None,
args=[],
vars={}):
"""
Configure the task table for interactive CRUD,
setting defaults, widgets and hiding unnecessary fields
@param task: the task name (will use a UUID if omitted)
@param function: the function name (won't hide if omitted)
@param args: the function position arguments
@param vars: the function named arguments
"""
T = current.T
db = current.db
tablename = self.TASK_TABLENAME
table = db[tablename]
if not task:
import uuid
task = str(uuid.uuid4())
table.task_name.default = task
table.task_name.readable = False
table.task_name.writable = False
if function:
table.function_name.default = function
table.function_name.readable = False
table.function_name.writable = False
table.args.default = json.dumps(args)
table.args.readable = False
table.args.writable = False
table.repeats.label = T("Repeat")
table.repeats.comment = T("times (0 = unlimited)")
table.repeats.default = 0
table.repeats.represent = lambda opt: opt and "%s %s" % (opt, T("times")) or \
opt == 0 and T("unlimited") or \
"-"
table.period.label = T("Run every")
table.period.widget = S3TimeIntervalWidget.widget
table.period.requires = IS_TIME_INTERVAL_WIDGET(table.period)
table.period.represent = S3TimeIntervalWidget.represent
table.period.comment = None
table.timeout.default = 600
table.timeout.represent = lambda opt: opt and "%s %s" % (opt, T("seconds")) or \
opt == 0 and T("unlimited") or \
"-"
table.vars.default = json.dumps(vars)
table.vars.readable = False
table.vars.writable = False
table.application_name.readable = False
table.application_name.writable = False
table.group_name.readable = False
table.group_name.writable = False
table.status.readable = False
table.status.writable = False
table.next_run_time.readable = False
table.next_run_time.writable = False
table.times_run.readable = False
table.times_run.writable = False
table.assigned_worker_name.readable = False
table.assigned_worker_name.writable = False
manager = current.manager
manager.configure(tablename,
list_fields=["id",
"enabled",
"start_time",
"repeats",
"period",
(T("Last run"), "last_run_time"),
(T("Last status"), "status"),
(T("Next run"), "next_run_time"),
"stop_time"])
response = current.response
if response:
s3 = response.s3
s3.crud_strings[tablename] = Storage(
title_create = T("Add Job"),
title_display = T("Scheduled Jobs"),
title_list = T("Job Schedule"),
title_update = T("Edit Job"),
title_search = T("Search for Job"),
subtitle_create = T("Add Job"),
subtitle_list = T("Currently Configured Jobs"),
label_list_button = T("List Jobs"),
label_create_button = T("Add Job"),
msg_record_created = T("Job added"),
msg_record_modified = T("Job updated updated"),
msg_record_deleted = T("Job deleted"),
msg_list_empty = T("No jobs configured yet"),
msg_no_match = T("No jobs configured"))
return
# -------------------------------------------------------------------------
# API Function run within the main flow of the application
# -------------------------------------------------------------------------
def async(self, task, args=[], vars={}, timeout=300):
"""
Wrapper to call an asynchronous task.
- run from the main request
@param task: The function which should be run
- async if a worker is alive
@param args: The list of unnamed args to send to the function
@param vars: The list of named vars to send to the function
@param timeout: The length of time available for the task to complete
- default 300s (5 mins)
"""
# Check that task is defined
tasks = current.response.s3.tasks
if not tasks:
return False
if task not in tasks:
return False
# Check that worker is alive
if not self._is_alive():
# Run the task synchronously
_args = []
for arg in args:
if isinstance(arg, (int, long)):
_args.append(str(arg))
elif isinstance(arg, str):
_args.append("'%s'" % str(arg))
else:
raise HTTP(501, "Unhandled arg type")
args = " ,".join(_args)
_vars = ""
for var in vars:
_vars += ", %s=%s" % (str(var),
str(vars[var]))
statement = "tasks['%s'](%s%s)" % (task, args, _vars)
exec(statement)
return None
auth = current.auth
if auth.is_logged_in():
# Add the current user to the vars
vars["user_id"] = auth.user.id
# Run the task asynchronously
db = current.db
record = db.scheduler_task.insert(task_name=task,
function_name=task,
args=json.dumps(args),
vars=json.dumps(vars),
timeout=timeout)
# Return record so that status can be polled
return record
# -------------------------------------------------------------------------
def schedule_task(self,
task,
args=[], # args to pass to the task
vars={}, # vars to pass to the task
function_name=None,
start_time=None,
next_run_time=None,
stop_time=None,
repeats=None,
period=None,
timeout=None,
enabled=None, # None = Enabled
group_name=None,
ignore_duplicate=False):
"""
Schedule a task in web2py Scheduler
@param task: name of the function/task to be scheduled
@param args: args to be passed to the scheduled task
@param vars: vars to be passed to the scheduled task
@param function_name: function name (if different from task name)
@param start_time: start_time for the scheduled task
@param next_run_time: next_run_time for the the scheduled task
@param stop_time: stop_time for the the scheduled task
@param repeats: number of times the task to be repeated
@param period: time period between two consecutive runs
@param timeout: set timeout for a running task
@param enabled: enabled flag for the scheduled task
@param group_name: group_name for the scheduled task
@param ignore_duplicate: disable or enable duplicate checking
"""
kwargs = {}
if function_name is None:
function_name = task
# storing valid keyword arguments only if they are provided
if start_time:
kwargs["start_time"] = start_time
if next_run_time:
kwargs["next_run_time"] = next_run_time
elif start_time:
# default it to start_time
kwargs["next_run_time"] = start_time
if stop_time:
kwargs["stop_time"] = stop_time
elif start_time:
# default it to one day ahead of given start_time
if not isinstance(start_time, datetime.datetime):
start_time = datetime.datetime.strptime(start_time, "%Y-%m-%d %H:%M:%S")
stop_time = start_time + datetime.timedelta(days=1)
if repeats is not None:
kwargs["repeats"] = repeats
if period:
kwargs["period"] = period
if timeout:
kwargs["timeout"] = timeout
if enabled != None:
# NB None => enabled
kwargs["enabled"] = enabled
if group_name:
kwargs["group_name"] = group_name
if not ignore_duplicate and self._duplicate_task_exists(task, args, vars):
# if duplicate task exists, do not insert a new one
s3_debug("Duplicate Task, Not Inserted", value=task)
return False
auth = current.auth
if auth.is_logged_in():
# Add the current user to the vars
vars["user_id"] = auth.user.id
# Add to DB for pickup by Scheduler task
db = current.db
record = db.scheduler_task.insert(task_name=task,
function_name=function_name,
args=json.dumps(args),
vars=json.dumps(vars),
**kwargs)
return record
# -------------------------------------------------------------------------
def _duplicate_task_exists(self, task, args, vars):
"""
Checks if given task already exists in the Scheduler and both coincide
with their execution time
@param task: name of the task function
@param args: the job position arguments (list)
@param vars: the job named arguments (dict)
"""
db = current.db
ttable = db.scheduler_task
_args = json.dumps(args)
query = ((ttable.function_name == task) & \
(ttable.args == _args) & \
(ttable.status.belongs(["RUNNING", "QUEUED", "ALLOCATED"])))
jobs = db(query).select(ttable.vars)
for job in jobs:
job_vars = json.loads(job.vars)
if job_vars == vars:
return True
return False
# -------------------------------------------------------------------------
def _is_alive(self):
"""
Returns True if there is at least 1 active worker to run scheduled tasks
- run from the main request
NB Can't run this 1/request at the beginning since the tables
only get defined in zz_last
"""
#if self.scheduler:
# return self.scheduler.is_alive()
#else:
# return False
db = current.db
gis = current.gis
now = datetime.datetime.now()
offset = datetime.timedelta(minutes=1)
table = db.scheduler_worker
query = (table.last_heartbeat > (now - offset))
worker_alive = db(query).select(table.id,
limitby=(0, 1),
cache=gis.cache).first()
if worker_alive:
return True
else:
return False
# -------------------------------------------------------------------------
@staticmethod
def reset(task_id):
"""
Reset the status of a task to QUEUED after FAILED
@param task_id: the task record ID
"""
db = current.db
ttable = db.scheduler_task
query = (ttable.id == task_id) & (ttable.status == "FAILED")
task = db(query).select(limitby=(0, 1)).first()
if task:
task.update_record(status="QUEUED")
# =========================================================================
# Functions run within the Task itself
# =========================================================================
def authenticate(self, user_id):
"""
Activate the authentication passed from the caller to this new request
- run from within the task
NB This is so simple that we don't normally run via this API
- this is just kept as an example of what needs to happen within the task
"""
current.auth.s3_impersonate(user_id)
# END =========================================================================
|
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "pib-"
cfg.versionfile_source = "pib/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
|
|
# -*- coding: utf-8 -*-
"""This file contains a basic Skype SQLite parser."""
import logging
from plaso.events import time_events
from plaso.parsers import sqlite
from plaso.parsers.sqlite_plugins import interface
__author__ = 'Joaquin Moreno Garijo (bastionado@gmail.com)'
class SkypeChatEvent(time_events.PosixTimeEvent):
"""Convenience class for a Skype event."""
DATA_TYPE = u'skype:event:chat'
def __init__(self, row, to_account):
"""Build a Skype Event from a single row.
Args:
row: A row object (instance of sqlite3.Row) that contains the
extracted data from a single row in the database.
to_account: A string containing the accounts (excluding the
author) of the conversation.
"""
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
super(SkypeChatEvent, self).__init__(
row['timestamp'], u'Chat from Skype', self.DATA_TYPE)
self.title = row['title']
self.text = row['body_xml']
self.from_account = u'{0:s} <{1:s}>'.format(
row['from_displayname'], row['author'])
self.to_account = to_account
class SkypeAccountEvent(time_events.PosixTimeEvent):
"""Convenience class for account information."""
DATA_TYPE = u'skype:event:account'
def __init__(
self, timestamp, usage, identifier, full_name, display_name, email,
country):
"""Initialize the event.
Args:
timestamp: The POSIX timestamp value.
usage: A string containing the description string of the timestamp.
identifier: The row identifier.
full_name: A string containing the full name of the Skype account holder.
display_name: A string containing the chosen display name of the account
holder.
email: A string containing the registered email address of the account
holder.
country: A string containing the chosen home country of the account
holder.
"""
super(SkypeAccountEvent, self).__init__(timestamp, usage)
self.offset = identifier
self.username = u'{0:s} <{1:s}>'.format(full_name, display_name)
self.display_name = display_name
self.email = email
self.country = country
self.data_type = self.DATA_TYPE
class SkypeSMSEvent(time_events.PosixTimeEvent):
"""Convenience EventObject for SMS."""
DATA_TYPE = u'skype:event:sms'
def __init__(self, row, dst_number):
"""Read the information related with the SMS.
Args:
row: row form the sql query.
row['time_sms']: timestamp when the sms was send.
row['dstnum_sms']: number which receives the sms.
row['msg_sms']: text send to this sms.
dst_number: phone number where the user send the sms.
"""
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
super(SkypeSMSEvent, self).__init__(
row['time_sms'], u'SMS from Skype', self.DATA_TYPE)
self.number = dst_number
self.text = row['msg_sms']
class SkypeCallEvent(time_events.PosixTimeEvent):
"""Convenience EventObject for the calls."""
DATA_TYPE = u'skype:event:call'
def __init__(self, timestamp, call_type, user_start_call,
source, destination, video_conference):
"""Contains information if the call was cancelled, accepted or finished.
Args:
timestamp: the timestamp of the event.
call_type: WAITING, STARTED, FINISHED.
user_start_call: boolean, true indicates that the owner
account started the call.
source: the account which started the call.
destination: the account which gets the call.
video_conference: boolean, if is true it was a videoconference.
"""
super(SkypeCallEvent, self).__init__(
timestamp, u'Call from Skype', self.DATA_TYPE)
self.call_type = call_type
self.user_start_call = user_start_call
self.src_call = source
self.dst_call = destination
self.video_conference = video_conference
class SkypeTransferFileEvent(time_events.PosixTimeEvent):
"""Evaluate the action of send a file."""
DATA_TYPE = u'skype:event:transferfile'
def __init__(self, row, timestamp, action_type, source, destination):
"""Actions related with sending files.
Args:
row:
filepath: path from the file.
filename: name of the file.
filesize: size of the file.
timestamp: when the action happens.
action_type: GETSOLICITUDE, SENDSOLICITUDE, ACCEPTED, FINISHED.
source: The account that sent the file.
destination: The account that received the file.
"""
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
super(SkypeTransferFileEvent, self).__init__(
timestamp, u'File transfer from Skype', self.DATA_TYPE)
self.offset = row['id']
self.action_type = action_type
self.source = source
self.destination = destination
self.transferred_filepath = row['filepath']
self.transferred_filename = row['filename']
try:
self.transferred_filesize = int(row['filesize'])
except ValueError:
logging.debug(u'Unknown filesize {0:s}'.format(
self.transferred_filename))
self.transferred_filesize = 0
class SkypePlugin(interface.SQLitePlugin):
"""SQLite plugin for Skype main.db SQlite database file."""
NAME = u'skype'
DESCRIPTION = u'Parser for Skype SQLite database files.'
# Queries for building cache.
QUERY_DEST_FROM_TRANSFER = (
u'SELECT parent_id, partner_handle AS skypeid, '
u'partner_dispname AS skypename FROM transfers')
QUERY_SOURCE_FROM_TRANSFER = (
u'SELECT pk_id, partner_handle AS skypeid, '
u'partner_dispname AS skypename FROM transfers')
# Define the needed queries.
QUERIES = [
((u'SELECT c.id, c.participants, c.friendlyname AS title, '
u'm.author AS author, m.from_dispname AS from_displayname, '
u'm.body_xml, m.timestamp, c.dialog_partner FROM Chats c, Messages m '
u'WHERE c.name = m.chatname'), u'ParseChat'),
((u'SELECT id, fullname, given_displayname, emails, '
u'country, profile_timestamp, authreq_timestamp, '
u'lastonline_timestamp, mood_timestamp, sent_authrequest_time, '
u'lastused_timestamp FROM Accounts'), u'ParseAccountInformation'),
((u'SELECT id, target_numbers AS dstnum_sms, timestamp AS time_sms, '
u'body AS msg_sms FROM SMSes'), u'ParseSMS'),
((u'SELECT id, partner_handle, partner_dispname, offer_send_list, '
u'starttime, accepttime, finishtime, filepath, filename, filesize, '
u'status, parent_id, pk_id FROM Transfers'), u'ParseFileTransfer'),
((u'SELECT c.id, cm.guid, c.is_incoming, '
u'cm.call_db_id, cm.videostatus, c.begin_timestamp AS try_call, '
u'cm.start_timestamp AS accept_call, cm.call_duration '
u'FROM Calls c, CallMembers cm '
u'WHERE c.id = cm.call_db_id;'), u'ParseCall')]
# The required tables.
REQUIRED_TABLES = frozenset([
u'Chats', u'Accounts', u'Conversations', u'Contacts', u'SMSes',
u'Transfers', u'CallMembers', u'Calls'])
def ParseAccountInformation(
self, parser_mediator, row, query=None, **unused_kwargs):
"""Parses the Accounts database.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
row: The row resulting from the query.
query: Optional query string. The default is None.
"""
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
if row['profile_timestamp']:
event_object = SkypeAccountEvent(
row['profile_timestamp'], u'Profile Changed', row['id'],
row['fullname'], row['given_displayname'], row['emails'],
row['country'])
parser_mediator.ProduceEvent(event_object, query=query)
if row['authreq_timestamp']:
event_object = SkypeAccountEvent(
row['authreq_timestamp'], u'Authenticate Request', row['id'],
row['fullname'], row['given_displayname'], row['emails'],
row['country'])
parser_mediator.ProduceEvent(event_object, query=query)
if row['lastonline_timestamp']:
event_object = SkypeAccountEvent(
row['lastonline_timestamp'], u'Last Online', row['id'],
row['fullname'], row['given_displayname'], row['emails'],
row['country'])
parser_mediator.ProduceEvent(event_object, query=query)
if row['mood_timestamp']:
event_object = SkypeAccountEvent(
row['mood_timestamp'], u'Mood Event', row['id'],
row['fullname'], row['given_displayname'], row['emails'],
row['country'])
parser_mediator.ProduceEvent(event_object, query=query)
if row['sent_authrequest_time']:
event_object = SkypeAccountEvent(
row['sent_authrequest_time'], u'Auth Request Sent', row['id'],
row['fullname'], row['given_displayname'], row['emails'],
row['country'])
parser_mediator.ProduceEvent(event_object, query=query)
if row['lastused_timestamp']:
event_object = SkypeAccountEvent(
row['lastused_timestamp'], u'Last Used', row['id'],
row['fullname'], row['given_displayname'], row['emails'],
row['country'])
parser_mediator.ProduceEvent(event_object, query=query)
def ParseChat(self, parser_mediator, row, query=None, **unused_kwargs):
"""Parses a chat message row.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
row: The row resulting from the query.
query: Optional query string. The default is None.
"""
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
to_account = u''
accounts = []
participants = row['participants'].split(' ')
for participant in participants:
if participant != row['author']:
accounts.append(participant)
to_account = u', '.join(accounts)
if not to_account:
if row['dialog_partner']:
to_account = row['dialog_partner']
else:
to_account = u'Unknown User'
event_object = SkypeChatEvent(row, to_account)
parser_mediator.ProduceEvent(event_object, query=query)
def ParseSMS(self, parser_mediator, row, query=None, **unused_kwargs):
"""Parse SMS.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
row: The row resulting from the query.
query: Optional query string. The default is None.
"""
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
dst_number = row['dstnum_sms'].replace(u' ', u'')
event_object = SkypeSMSEvent(row, dst_number)
parser_mediator.ProduceEvent(event_object, query=query)
def ParseCall(self, parser_mediator, row, query=None, **unused_kwargs):
"""Parse the calls taking into accounts some rows.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
row: The row resulting from the query.
query: Optional query string. The default is None.
"""
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
try:
aux = row['guid']
if aux:
aux_list = aux.split(u'-')
src_aux = aux_list[0]
dst_aux = aux_list[1]
else:
src_aux = u'Unknown [no GUID]'
dst_aux = u'Unknown [no GUID]'
except IndexError:
src_aux = u'Unknown [{0:s}]'.format(row['guid'])
dst_aux = u'Unknown [{0:s}]'.format(row['guid'])
if row['is_incoming'] == u'0':
user_start_call = True
source = src_aux
if row['ip_address']:
destination = u'{0:s} <{1:s}>'.format(dst_aux, row['ip_address'])
else:
destination = dst_aux
else:
user_start_call = False
source = src_aux
destination = dst_aux
if row['videostatus'] == u'3':
video_conference = True
else:
video_conference = False
event_object = SkypeCallEvent(
row['try_call'], u'WAITING', user_start_call, source, destination,
video_conference)
parser_mediator.ProduceEvent(event_object, query=query)
if row['accept_call']:
event_object = SkypeCallEvent(
row['accept_call'], u'ACCEPTED', user_start_call, source,
destination, video_conference)
parser_mediator.ProduceEvent(event_object, query=query)
if row['call_duration']:
try:
timestamp = int(row['accept_call']) + int(row['call_duration'])
event_object = SkypeCallEvent(
timestamp, u'FINISHED', user_start_call, source, destination,
video_conference)
parser_mediator.ProduceEvent(event_object, query=query)
except ValueError:
logging.debug((
u'[{0:s}] Unable to determine when the call {1:s} was '
u'finished.').format(self.NAME, row['id']))
def ParseFileTransfer(
self, parser_mediator, row, cache=None, database=None, query=None,
**unused_kwargs):
"""Parse the transfer files.
There is no direct relationship between who sends the file and
who accepts the file.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
row: the row with all information related with the file transfers.
query: Optional query string. The default is None.
cache: a cache object (instance of SQLiteCache).
database: A database object (instance of SQLiteDatabase).
"""
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
source_dict = cache.GetResults(u'source')
if not source_dict:
cursor = database.cursor
results = cursor.execute(self.QUERY_SOURCE_FROM_TRANSFER)
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
cache.CacheQueryResults(
results, 'source', 'pk_id', ('skypeid', 'skypename'))
source_dict = cache.GetResults(u'source')
dest_dict = cache.GetResults(u'destination')
if not dest_dict:
cursor = database.cursor
results = cursor.execute(self.QUERY_DEST_FROM_TRANSFER)
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
cache.CacheQueryResults(
results, 'destination', 'parent_id', ('skypeid', 'skypename'))
dest_dict = cache.GetResults(u'destination')
source = u'Unknown'
destination = u'Unknown'
if row['parent_id']:
destination = u'{0:s} <{1:s}>'.format(
row['partner_handle'], row['partner_dispname'])
skype_id, skype_name = source_dict.get(row['parent_id'], [None, None])
if skype_name:
source = u'{0:s} <{1:s}>'.format(skype_id, skype_name)
else:
source = u'{0:s} <{1:s}>'.format(
row['partner_handle'], row['partner_dispname'])
if row['pk_id']:
skype_id, skype_name = dest_dict.get(row['pk_id'], [None, None])
if skype_name:
destination = u'{0:s} <{1:s}>'.format(skype_id, skype_name)
if row['status'] == 8:
if row['starttime']:
event_object = SkypeTransferFileEvent(
row, row['starttime'], u'GETSOLICITUDE', source, destination)
parser_mediator.ProduceEvent(event_object, query=query)
if row['accepttime']:
event_object = SkypeTransferFileEvent(
row, row['accepttime'], u'ACCEPTED', source, destination)
parser_mediator.ProduceEvent(event_object, query=query)
if row['finishtime']:
event_object = SkypeTransferFileEvent(
row, row['finishtime'], u'FINISHED', source, destination)
parser_mediator.ProduceEvent(event_object, query=query)
elif row['status'] == 2 and row['starttime']:
event_object = SkypeTransferFileEvent(
row, row['starttime'], u'SENDSOLICITUDE', source, destination)
parser_mediator.ProduceEvent(event_object, query=query)
sqlite.SQLiteParser.RegisterPlugin(SkypePlugin)
|
|
## @file
# This file is used to define common items of class object
#
# Copyright (c) 2007 - 2015, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
# Generate help text
#
def GenerateHelpText(Text, Lang):
if Text:
Ht = HelpTextClass()
Ht.Lang = Lang
Ht.String = Text
return Ht
return None
## CommonClass
#
# This class defined common items used in Module/Platform/Package files
#
# @param object: Inherited from object class
# @param Usage: Input value for Usage, default is []
# @param FeatureFlag: Input value for FeatureFalg, default is ''
# @param SupArchList: Input value for SupArchList, default is []
# @param HelpText: Input value for HelpText, default is ''
#
# @var Usage: To store value for Usage, selection scope is in below list
# ALWAYS_CONSUMED | SOMETIMES_CONSUMED | ALWAYS_PRODUCED | SOMETIMES_PRODUCED | TO_START | BY_START | PRIVATE
# @var FeatureFlag: To store value for FeatureFlag
# @var SupArchList: To store value for SupArchList, selection scope is in below list
# EBC | IA32 | X64 | IPF | ARM | PPC | AARCH64
# @var HelpText: To store value for HelpText
#
class CommonClass(object):
def __init__(self, Usage = None, FeatureFlag = '', SupArchList = None, HelpText = ''):
self.Usage = Usage
if self.Usage == None:
self.Usage = []
self.FeatureFlag = FeatureFlag
self.SupArchList = SupArchList
if self.SupArchList == None:
self.SupArchList = []
self.HelpText = HelpText
self.HelpTextList = []
## CommonHeaderClass
#
# This class defined common items used in Module/Platform/Package files
#
# @param object: Inherited from object class
#
# @var Abstract: To store value for Abstract
# @var Description: To store value for Description
# @var Copyright: To store value for Copyright
# @var License: To store value for License
# @var Specification: To store value for Specification
#
class CommonHeaderClass(object):
def __init__(self):
self.Abstract = ''
self.Description = ''
self.Copyright = ''
self.License = ''
self.Specification = {}
## HelpTextClass
#
# This class defined HelpText item used in PKG file
#
# @param object: Inherited from object class
#
# @var Lang: To store value for Lang
# @var String: To store value for String
#
class HelpTextClass(object):
def __init__(self):
self.Lang = ''
self.String = ''
## DefineClass
#
# This class defined item DEFINE used in Module/Platform/Package files
#
# @param object: Inherited from object class
#
# @var Define: To store value for Define, it is a set structure as
# { (DefineName, Arch) : DefineValue, ... }
#
class DefineClass(object):
def __init__(self):
self.Define = {}
## ClonedRecordClass
#
# This class defined ClonedRecord items used in Module/Platform/Package files
#
# @param object: Inherited from object class
#
# @var Id: To store value for Id
# @var FarGuid: To store value for FarGuid
# @var PackageGuid: To store value for PackageGuid
# @var PackageVersion: To store value for PackageVersion
# @var ModuleGuid: To store value for ModuleGuid
# @var ModuleVersion: To store value for ModuleVersion
#
class ClonedRecordClass(object):
def __init__(self):
self.Id = 0
self.FarGuid = ''
self.PackageGuid = ''
self.PackageVersion = ''
self.ModuleGuid = ''
self.ModuleVersion = ''
## IdentificationClass
#
# This class defined Identification items used in Module/Platform/Package files
#
# @param object: Inherited from object class
#
# @var Name: To store value for Name
# ModuleName(Inf) / PackageName(Dec) / PlatformName(Dsc)
# @var Guid: To store value for Guid
# @var Version: To store value for Version
# @var FileName: To store value for FileName
# @var FullPath: To store value for FullPath
#
class IdentificationClass(object):
def __init__(self):
self.Name = ''
self.BaseName = ''
self.Guid = ''
self.Version = ''
self.FileName = ''
self.FullPath = ''
self.RelaPath = ''
self.PackagePath = ''
self.ModulePath = ''
self.CombinePath = ''
## IncludeStatementClass
#
# This class defined IncludeFiles item used in Module/Platform/Package files
#
# @param object: Inherited from object class
#
# @var IncludeFiles: To store value for IncludeFiles
# It is a set structure as { IncludeFile : [Arch1, Arch2, ...], ... }
#
class IncludeStatementClass(object):
def __init__(self):
self.IncludeFiles = {}
## GuidProtocolPpiCommonClass
#
# This class defined Guid, Protocol and Ppi like items used in Module/Platform/Package files
#
# @param CommonClass: Inherited from CommonClass class
#
# @var Name: To store value for Name
# @var CName: To store value for CName
# @var Guid: To store value for Guid
# @var Notify: To store value for Notify
# @var GuidTypeList: To store value for GuidTypeList, selection scope is in below list
# DATA_HUB_RECORD | EFI_EVENT | EFI_SYSTEM_CONFIGURATION_TABLE | EFI_VARIABLE | GUID | HII_PACKAGE_LIST | HOB | TOKEN_SPACE_GUID
# @var SupModuleList: To store value for SupModuleList, selection scope is in below list
# BASE | SEC | PEI_CORE | PEIM | DXE_CORE | DXE_DRIVER | DXE_RUNTIME_DRIVER | DXE_SAL_DRIVER | DXE_SMM_DRIVER | UEFI_DRIVER | UEFI_APPLICATION | USER_DEFINED | SMM_CORE
#
class GuidProtocolPpiCommonClass(CommonClass):
def __init__(self):
self.Name = ''
self.CName = ''
self.Guid = ''
self.VariableName = ''
self.Notify = False
self.GuidTypeList = []
self.GuidTypeLists = []
self.SupModuleList = []
CommonClass.__init__(self)
## LibraryClassClass
#
# This class defined Library item used in Module/Platform/Package files
#
# @param CommonClass: Inherited from CommonClass class
# @param DefineClass: Inherited from DefineClass class
#
# @var LibraryClass: To store value for LibraryClass
# @var IncludeHeader: To store value for IncludeHeader
# @var RecommendedInstanceVersion: To store value for RecommendedInstanceVersion
# @var RecommendedInstanceGuid: To store value for RecommendedInstanceGuid
# @var RecommendedInstance: To store value for RecommendedInstance, selection scope is in below list
# DATA_HUB_RECORD | EFI_EVENT | EFI_SYSTEM_CONFIGURATION_TABLE | EFI_VARIABLE | GUID | HII_PACKAGE_LIST | HOB | TOKEN_SPACE_GUID
# @var SupModuleList: To store value for SupModuleList, selection scope is in below list
# BASE | SEC | PEI_CORE | PEIM | DXE_CORE | DXE_DRIVER | DXE_RUNTIME_DRIVER | DXE_SAL_DRIVER | DXE_SMM_DRIVER | UEFI_DRIVER | UEFI_APPLICATION | USER_DEFINED | SMM_CORE
#
class LibraryClassClass(CommonClass, DefineClass):
def __init__(self):
self.LibraryClass = ''
self.IncludeHeader = ''
self.RecommendedInstanceVersion = ''
self.RecommendedInstanceGuid = ''
self.RecommendedInstance = ''
self.SupModuleList = []
CommonClass.__init__(self)
DefineClass.__init__(self)
## GuidClass
#
# This class defined Guid item used in Module/Platform/Package files
#
# @param GuidProtocolPpiCommonClass: Inherited from GuidProtocolPpiCommonClass class
#
class GuidClass(GuidProtocolPpiCommonClass):
def __init__(self):
GuidProtocolPpiCommonClass.__init__(self)
## ProtocolClass
#
# This class defined Protocol item used in Module/Platform/Package files
#
# @param GuidProtocolPpiCommonClass: Inherited from GuidProtocolPpiCommonClass class
#
class ProtocolClass(GuidProtocolPpiCommonClass):
def __init__(self):
GuidProtocolPpiCommonClass.__init__(self)
## PpiClass
#
# This class defined Ppi item used in Module/Platform/Package files
#
# @param GuidProtocolPpiCommonClass: Inherited from GuidProtocolPpiCommonClass class
#
class PpiClass(GuidProtocolPpiCommonClass):
def __init__(self):
GuidProtocolPpiCommonClass.__init__(self)
## SkuInfoClass
#
# This class defined SkuInfo item used in Module/Platform/Package files
#
# @param object: Inherited from object class
# @param SkuIdName: Input value for SkuIdName, default is ''
# @param SkuId: Input value for SkuId, default is ''
# @param VariableName: Input value for VariableName, default is ''
# @param VariableGuid: Input value for VariableGuid, default is ''
# @param VariableOffset: Input value for VariableOffset, default is ''
# @param HiiDefaultValue: Input value for HiiDefaultValue, default is ''
# @param VpdOffset: Input value for VpdOffset, default is ''
# @param DefaultValue: Input value for DefaultValue, default is ''
#
# @var SkuIdName: To store value for SkuIdName
# @var SkuId: To store value for SkuId
# @var VariableName: To store value for VariableName
# @var VariableGuid: To store value for VariableGuid
# @var VariableOffset: To store value for VariableOffset
# @var HiiDefaultValue: To store value for HiiDefaultValue
# @var VpdOffset: To store value for VpdOffset
# @var DefaultValue: To store value for DefaultValue
#
class SkuInfoClass(object):
def __init__(self, SkuIdName = '', SkuId = '', VariableName = '', VariableGuid = '', VariableOffset = '',
HiiDefaultValue = '', VpdOffset = '', DefaultValue = '', VariableGuidValue = '', VariableAttribute = ''):
self.SkuIdName = SkuIdName
self.SkuId = SkuId
#
# Used by Hii
#
self.VariableName = VariableName
self.VariableGuid = VariableGuid
self.VariableGuidValue = VariableGuidValue
self.VariableOffset = VariableOffset
self.HiiDefaultValue = HiiDefaultValue
self.VariableAttribute = VariableAttribute
#
# Used by Vpd
#
self.VpdOffset = VpdOffset
#
# Used by Default
#
self.DefaultValue = DefaultValue
## Convert the class to a string
#
# Convert each member of the class to string
# Organize to a signle line format string
#
# @retval Rtn Formatted String
#
def __str__(self):
Rtn = 'SkuId = ' + str(self.SkuId) + "," + \
'SkuIdName = ' + str(self.SkuIdName) + "," + \
'VariableName = ' + str(self.VariableName) + "," + \
'VariableGuid = ' + str(self.VariableGuid) + "," + \
'VariableOffset = ' + str(self.VariableOffset) + "," + \
'HiiDefaultValue = ' + str(self.HiiDefaultValue) + "," + \
'VpdOffset = ' + str(self.VpdOffset) + "," + \
'DefaultValue = ' + str(self.DefaultValue) + ","
return Rtn
## PcdErrorClass
#
#
#
class PcdErrorClass(object):
def __init__(self):
self.ValidValueList = ''
self.ValidValueListLang = ''
self.ValidValueRange = ''
self.Expression = ''
self.ErrorNumber = ''
self.ErrorMessage = []
## PcdClass
#
# This class defined Pcd item used in Module/Platform/Package files
#
# @param CommonClass: Inherited from CommonClass class
# @param CName: Input value for CName, default is ''
# @param Token: Input value for Token, default is ''
# @param TokenSpaceGuidCName: Input value for TokenSpaceGuidCName, default is ''
# @param DatumType: Input value for DatumType, default is ''
# @param MaxDatumSize: Input value for MaxDatumSize, default is ''
# @param DefaultValue: Input value for DefaultValue, default is ''
# @param ItemType: Input value for ItemType, default is ''
# @param ValidUsage: Input value for ValidUsage, default is []
# @param SkuInfoList: Input value for SkuInfoList, default is {}
# @param SupModuleList: Input value for SupModuleList, default is []
#
# @var CName: To store value for CName
# @var Token: To store value for Token
# @var TokenSpaceGuidCName: To store value for TokenSpaceGuidCName
# @var DatumType: To store value for DatumType, selection scope is in below list
# UINT8 | UINT16 | UINT32 | UINT64 | VOID* | BOOLEAN
# @var MaxDatumSize: To store value for MaxDatumSize
# @var DefaultValue: To store value for DefaultValue
# @var ItemType: To store value for ItemType, selection scope is in below list
# FEATURE_FLAG | FIXED_AT_BUILD | PATCHABLE_IN_MODULE | DYNAMIC | DYNAMIC_EX
# @var ValidUsage: To store value for ValidUsage, selection scope is in below list
# FEATURE_FLAG | FIXED_AT_BUILD | PATCHABLE_IN_MODULE | DYNAMIC | DYNAMIC_EX
# @var SkuInfoList: To store value for SkuInfoList
# It is a set structure as { [SkuIdName] : SkuInfoClass }
# @var SupModuleList: To store value for SupModuleList, selection scope is in below list
# BASE | SEC | PEI_CORE | PEIM | DXE_CORE | DXE_DRIVER | DXE_RUNTIME_DRIVER | DXE_SAL_DRIVER | DXE_SMM_DRIVER | UEFI_DRIVER | UEFI_APPLICATION | USER_DEFINED | SMM_CORE
#
class PcdClass(CommonClass):
def __init__(self, CName = '', Token = '', TokenSpaceGuidCName = '', DatumType = '', MaxDatumSize = '', DefaultValue = '', ItemType = '', ValidUsage = None, SkuInfoList = None, SupModuleList = None):
self.CName = CName
self.Token = Token
self.TokenSpaceGuidCName = TokenSpaceGuidCName
self.DatumType = DatumType
self.MaxDatumSize = MaxDatumSize
self.DefaultValue = DefaultValue
self.ItemType = ItemType
self.ValidUsage = ValidUsage
self.PcdItemType = ''
self.TokenSpaceGuidValue = ''
self.PcdUsage = ''
self.PcdCName = ''
self.Value = ''
self.Offset = ''
if self.ValidUsage == None:
self.ValidUsage = []
self.SkuInfoList = SkuInfoList
if self.SkuInfoList == None:
self.SkuInfoList = {}
self.SupModuleList = SupModuleList
if self.SupModuleList == None:
self.SupModuleList = []
CommonClass.__init__(self)
self.PcdErrors = []
## BuildOptionClass
#
# This class defined BuildOption item used in Module/Platform/Package files
#
# @param IncludeStatementClass: Inherited from IncludeStatementClass class
# @param ToolChainFamily: Input value for ToolChainFamily, default is ''
# @param ToolChain: Input value for ToolChain, default is ''
# @param Option: Input value for Option, default is ''
#
# @var Statement: To store value for Statement
# It is a string in a special format as "Family:Target_TagName_Tarch_ToolCode_FLAGS = String"
# @var ToolChainFamily: To store value for ToolChainFamily
# @var ToolChain: To store value for ToolChain
# @var Option: To store value for Option
# @var BuildTarget: To store value for BuildTarget
# @var TagName: To store value for TagName
# @var ToolCode: To store value for ToolCode
# @var SupArchList: To store value for SupArchList, selection scope is in below list
# EBC | IA32 | X64 | IPF | ARM | PPC | AARCH64
#
class BuildOptionClass(IncludeStatementClass):
def __init__(self, ToolChainFamily = '', ToolChain = '', Option = ''):
IncludeStatementClass.__init__(self)
self.Statement = ''
self.ToolChainFamily = ToolChainFamily
self.ToolChain = ToolChain
self.Option = Option
self.BuildTarget = ''
self.TagName = ''
self.ToolCode = ''
self.SupArchList = []
## IncludeClass
#
# This class defined Include item used in Module/Platform/Package files
#
# @param CommonClass: Inherited from CommonClass class
#
# @var FilePath: To store value for FilePath
# @var ModuleType: To store value for ModuleType
# @var Comment: To store value for Comment
#
class IncludeClass(CommonClass):
def __init__(self):
self.FilePath = ''
self.ModuleType = ''
self.SupModuleList = []
self.Comment = ''
CommonClass.__init__(self)
## FileClass
#
#
class FileClass(CommonClass):
def __init__(self):
self.Filename = ''
self.Executable = ''
self.Family = ''
self.FileType = ''
CommonClass.__init__(self)
## MiscFileClass
#
#
class MiscFileClass(CommonHeaderClass):
def __init__(self):
CommonHeaderClass.__init__(self)
self.Name = ''
self.Files = []
## UserExtensionsClass
#
# This class defined UserExtensions item used in Module/Platform/Package files
#
# @param object: Inherited from object class
#
# @var UserID: To store value for UserID
# @var Identifier: To store value for Identifier
# @var Content: To store value for Content
#
class UserExtensionsClass(object):
def __init__(self):
self.UserID = ''
self.Identifier = 0
self.Content = ''
self.Defines = []
self.BuildOptions = []
|
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide base classes for the Bokeh property system.
.. note::
These classes form part of the very low-level machinery that implements
the Bokeh model and property system. It is unlikely that any of these
classes or their methods will be applicable to any standard usage or to
anyone who is not directly developing on Bokeh's own infrastructure.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import types
from copy import copy
# External imports
import numpy as np
# Bokeh imports
from ...util.dependencies import import_optional
from ...util.string import nice_join
from ..has_props import HasProps
from .descriptor_factory import PropertyDescriptorFactory
from .descriptors import BasicPropertyDescriptor
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
pd = import_optional('pandas')
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
__all__ = (
'ContainerProperty',
'DeserializationError',
'PrimitiveProperty',
'Property',
'validation_on',
)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class DeserializationError(Exception):
pass
class Property(PropertyDescriptorFactory):
''' Base class for Bokeh property instances, which can be added to Bokeh
Models.
Args:
default (obj or None, optional) :
A default value for attributes created from this property to
have (default: None)
help (str or None, optional) :
A documentation string for this property. It will be automatically
used by the :ref:`bokeh.sphinxext.bokeh_prop` extension when
generating Spinx documentation. (default: None)
serialized (bool, optional) :
Whether attributes created from this property should be included
in serialization (default: True)
readonly (bool, optional) :
Whether attributes created from this property are read-only.
(default: False)
'''
# This class attribute is controlled by external helper API for validation
_should_validate = True
def __init__(self, default=None, help=None, serialized=None, readonly=False):
# This is how the descriptor is created in the class declaration.
if serialized is None:
self._serialized = False if readonly else True
else:
self._serialized = serialized
self._readonly = readonly
self._default = default
self.__doc__ = help
self.alternatives = []
self.assertions = []
def __str__(self):
return self.__class__.__name__
@classmethod
def _sphinx_prop_link(cls):
''' Generate a sphinx :class: link to this property.
'''
# extra space at the end is unfortunately necessary to appease Sphinx
return ":class:`~bokeh.core.properties.%s` " % cls.__name__
@staticmethod
def _sphinx_model_link(name):
''' Generate a sphinx :class: link to given named model.
'''
return ":class:`~%s` " % name
def _sphinx_type(self):
''' Generate a Sphinx-style reference to this type for documentation
automation purposes.
'''
return self._sphinx_prop_link()
def make_descriptors(self, base_name):
''' Return a list of ``BasicPropertyDescriptor`` instances to install
on a class, in order to delegate attribute access to this property.
Args:
name (str) : the name of the property these descriptors are for
Returns:
list[BasicPropertyDescriptor]
The descriptors returned are collected by the ``MetaHasProps``
metaclass and added to ``HasProps`` subclasses during class creation.
'''
return [ BasicPropertyDescriptor(base_name, self) ]
def _may_have_unstable_default(self):
''' False if we have a default that is immutable, and will be the
same every time (some defaults are generated on demand by a function
to be called).
'''
return isinstance(self._default, types.FunctionType)
@classmethod
def _copy_default(cls, default):
''' Return a copy of the default, or a new value if the default
is specified by a function.
'''
if not isinstance(default, types.FunctionType):
return copy(default)
else:
return default()
def _raw_default(self):
''' Return the untransformed default value.
The raw_default() needs to be validated and transformed by
prepare_value() before use, and may also be replaced later by
subclass overrides or by themes.
'''
return self._copy_default(self._default)
def themed_default(self, cls, name, theme_overrides):
''' The default, transformed by prepare_value() and the theme overrides.
'''
overrides = theme_overrides
if overrides is None or name not in overrides:
overrides = cls._overridden_defaults()
if name in overrides:
default = self._copy_default(overrides[name])
else:
default = self._raw_default()
return self.prepare_value(cls, name, default)
@property
def serialized(self):
''' Whether the property should be serialized when serializing an object.
This would be False for a "virtual" or "convenience" property that duplicates
information already available in other properties, for example.
'''
return self._serialized
@property
def readonly(self):
''' Whether this property is read-only.
Read-only properties may only be modified by the client (i.e., by BokehJS
in the browser).
'''
return self._readonly
def matches(self, new, old):
''' Whether two parameters match values.
If either ``new`` or ``old`` is a NumPy array or Pandas Series or Index,
then the result of ``np.array_equal`` will determine if the values match.
Otherwise, the result of standard Python equality will be returned.
Returns:
True, if new and old match, False otherwise
'''
if isinstance(new, np.ndarray) or isinstance(old, np.ndarray):
return np.array_equal(new, old)
if pd:
if isinstance(new, pd.Series) or isinstance(old, pd.Series):
return np.array_equal(new, old)
if isinstance(new, pd.Index) or isinstance(old, pd.Index):
return np.array_equal(new, old)
try:
# this handles the special but common case where there is a dict with array
# or series as values (e.g. the .data property of a ColumnDataSource)
if isinstance(new, dict) and isinstance(old, dict):
if set(new.keys()) != set(old.keys()):
return False
return all(self.matches(new[k], old[k]) for k in new)
# FYI Numpy can erroneously raise a warning about elementwise
# comparison here when a timedelta is compared to another scalar.
# https://github.com/numpy/numpy/issues/10095
return new == old
# if the comparison fails for some reason, just punt and return no-match
except ValueError:
return False
def from_json(self, json, models=None):
''' Convert from JSON-compatible values into a value for this property.
JSON-compatible values are: list, dict, number, string, bool, None
'''
return json
def serialize_value(self, value):
''' Change the value into a JSON serializable format.
'''
return value
def transform(self, value):
''' Change the value into the canonical format for this property.
Args:
value (obj) : the value to apply transformation to.
Returns:
obj: transformed value
'''
return value
def validate(self, value, detail=True):
''' Determine whether we can set this property from this value.
Validation happens before transform()
Args:
value (obj) : the value to validate against this property type
detail (bool, options) : whether to construct detailed exceptions
Generating detailed type validation error messages can be
expensive. When doing type checks internally that will not
escape exceptions to users, these messages can be skipped
by setting this value to False (default: True)
Returns:
None
Raises:
ValueError if the value is not valid for this property type
'''
pass
def is_valid(self, value):
''' Whether the value passes validation
Args:
value (obj) : the value to validate against this property type
Returns:
True if valid, False otherwise
'''
try:
if validation_on():
self.validate(value, False)
except ValueError:
return False
else:
return True
@classmethod
def wrap(cls, value):
''' Some property types need to wrap their values in special containers, etc.
'''
return value
def prepare_value(self, obj_or_cls, name, value):
try:
if validation_on():
self.validate(value)
except ValueError as e:
for tp, converter in self.alternatives:
if tp.is_valid(value):
value = converter(value)
break
else:
raise e
else:
value = self.transform(value)
if isinstance(obj_or_cls, HasProps):
obj = obj_or_cls
for fn, msg_or_fn in self.assertions:
if isinstance(fn, bool):
result = fn
else:
result = fn(obj, value)
assert isinstance(result, bool)
if not result:
if isinstance(msg_or_fn, str):
raise ValueError(msg_or_fn)
else:
msg_or_fn(obj, name, value)
return self.wrap(value)
@property
def has_ref(self):
return False
def accepts(self, tp, converter):
''' Declare that other types may be converted to this property type.
Args:
tp (Property) :
A type that may be converted automatically to this property
type.
converter (callable) :
A function accepting ``value`` to perform conversion of the
value to this property type.
Returns:
self
'''
tp = ParameterizedProperty._validate_type_param(tp)
self.alternatives.append((tp, converter))
return self
def asserts(self, fn, msg_or_fn):
''' Assert that prepared values satisfy given conditions.
Assertions are intended in enforce conditions beyond simple value
type validation. For instance, this method can be use to assert that
the columns of a ``ColumnDataSource`` all collectively have the same
length at all times.
Args:
fn (callable) :
A function accepting ``(obj, value)`` that returns True if the value
passes the assertion, or False otherwise.
msg_or_fn (str or callable) :
A message to print in case the assertion fails, or a function
accepting ``(obj, name, value)`` to call in in case the assertion
fails.
Returns:
self
'''
self.assertions.append((fn, msg_or_fn))
return self
class ParameterizedProperty(Property):
''' A base class for Properties that have type parameters, e.g.
``List(String)``.
'''
@staticmethod
def _validate_type_param(type_param):
if isinstance(type_param, type):
if issubclass(type_param, Property):
return type_param()
else:
type_param = type_param.__name__
elif isinstance(type_param, Property):
return type_param
raise ValueError("expected a Property as type parameter, got %s" % type_param)
@property
def type_params(self):
raise NotImplementedError("abstract method")
@property
def has_ref(self):
return any(type_param.has_ref for type_param in self.type_params)
class PrimitiveProperty(Property):
''' A base class for simple property types.
Subclasses should define a class attribute ``_underlying_type`` that is
a tuple of acceptable type values for the property.
Example:
A trivial version of a ``Float`` property might look like:
.. code-block:: python
class Float(PrimitiveProperty):
_underlying_type = (numbers.Real,)
'''
_underlying_type = None
def validate(self, value, detail=True):
super().validate(value, detail)
if not (value is None or isinstance(value, self._underlying_type)):
msg = "" if not detail else "expected a value of type %s, got %s of type %s" % (
nice_join([ cls.__name__ for cls in self._underlying_type ]), value, type(value).__name__
)
raise ValueError(msg)
def from_json(self, json, models=None):
if json is None or isinstance(json, self._underlying_type):
return json
else:
expected = nice_join([ cls.__name__ for cls in self._underlying_type ])
raise DeserializationError("%s expected %s, got %s of type %s" % (self, expected, json, type(json).__name__))
def _sphinx_type(self):
return self._sphinx_prop_link()
class ContainerProperty(ParameterizedProperty):
''' A base class for Container-like type properties.
'''
def _may_have_unstable_default(self):
# all containers are mutable, so the default can be modified
return True
def validation_on():
''' Check if property validation is currently active
Returns:
bool
'''
return Property._should_validate
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
|
#! /usr/bin/env python
"""
Logfile tailer for rotated log files.
Supports 2 operating modes: classic, rotated.
Assumes that:
. All log files reside in the same directory.
. We can find last log file by sorting the file list alphabetically.
In classic mode:
. When log is switched, the tailer continues tailing from the next file.
. When the tailer is restarted, it continues tailing from saved position.
In rotated mode:
. When log is switched, the tailer continues tailing from reopened file.
"""
from __future__ import with_statement
import cStringIO
import glob
import os
import re
import sys
import time
import skytools
import cc.util
from cc.daemon import CCDaemon
from cc.message import is_msg_req_valid
from cc.reqs import LogtailMessage
class LogfileTailer (CCDaemon):
""" Logfile tailer for rotated log files """
log = skytools.getLogger ('d:LogfileTailer')
BUF_MINBYTES = 64 * 1024
PROBESLEFT = 2 # number of retries after old log EOF and new log spotted
def reload (self):
super(LogfileTailer, self).reload()
self.op_mode = self.cf.get ('operation-mode', '')
if self.op_mode not in (None, '', 'classic', 'rotated'):
self.log.error ("unknown operation-mode: %s", self.op_mode)
self.file_mode = self.cf.get ('file-mode', '')
if self.file_mode not in (None, '', 'text', 'binary'):
self.log.error ("unknown file-mode: %s", self.file_mode)
self.logdir = self.cf.getfile ('logdir')
if self.op_mode in (None, '', 'classic'):
self.logmask = self.cf.get ('logmask')
elif self.op_mode == 'rotated':
self.logname = self.cf.get ('logname')
if re.search ('\?|\*', self.logname):
self.log.error ("wildcards in logname not supported: %s", self.logname)
self.logmask = self.logname
self.compression = self.cf.get ('compression', '')
if self.compression not in (None, '', 'none', 'gzip', 'bzip2'):
self.log.error ("unknown compression: %s", self.compression)
self.compression_level = self.cf.getint ('compression-level', '')
self.msg_suffix = self.cf.get ('msg-suffix', '')
if self.msg_suffix and not is_msg_req_valid (self.msg_suffix):
self.log.error ("invalid msg-suffix: %s", self.msg_suffix)
self.msg_suffix = None
self.use_blob = self.cf.getbool ('use-blob', True)
self.lag_maxbytes = cc.util.hsize_to_bytes (self.cf.get ('lag-max-bytes', '0'))
self.reverse_sort = False
self.buf_maxbytes = cc.util.hsize_to_bytes (self.cf.get ('buffer-bytes', '0'))
self.buf_maxlines = self.cf.getint ('buffer-lines', -1)
self.buf_maxdelay = 1.0
# compensate for our config class weakness
if self.buf_maxbytes <= 0: self.buf_maxbytes = None
if self.buf_maxlines < 0: self.buf_maxlines = None
# set defaults if nothing found in config
if self.buf_maxbytes is None and self.buf_maxlines is None:
self.buf_maxbytes = 1024 * 1024
if self.compression not in (None, '', 'none'):
if self.buf_maxbytes < self.BUF_MINBYTES:
self.log.info ("buffer-bytes too low, adjusting: %i -> %i", self.buf_maxbytes, self.BUF_MINBYTES)
self.buf_maxbytes = self.BUF_MINBYTES
def startup (self):
super(LogfileTailer, self).startup()
self.logfile = None # full path
self.logf = None # file object
self.logfpos = None # tell()
self.probesleft = self.PROBESLEFT
self.first = True
self.tailed_files = 0
self.tailed_bytes = 0
self.buffer = cStringIO.StringIO()
self.buflines = 0
self.bufseek = None
self.saved_fpos = None
self.save_file = None
self.logf_dev = self.logf_ino = None
sfn = self.get_save_filename()
try:
with open (sfn, "r") as f:
s = f.readline().split('\t', 1)
try:
self.logfile = s[1].strip()
self.saved_fpos = int(s[0])
self.log.info ("found saved state for %s", self.logfile)
except:
self.logfile = self.saved_fpos = None
if self.op_mode == 'rotated':
self.log.info ("cannot use saved state in this operation mode")
self.logfile = self.saved_fpos = None
lag = self.count_lag_bytes()
if lag is not None:
self.log.info ("currently lagging %i bytes behind", lag)
if lag > self.lag_maxbytes:
self.log.warning ("lag too big, skipping")
self.logfile = self.saved_fpos = None
else:
self.log.warning ("cannot determine lag, skipping")
self.logfile = self.saved_fpos = None
except IOError:
pass
self.save_file = open (sfn, "a")
def count_lag_bytes (self):
files = self.get_all_filenames()
if self.logfile not in files or self.saved_fpos is None:
return None
lag = 0
while True:
fn = files.pop()
st = os.stat(fn)
lag += st.st_size
if (fn == self.logfile):
break
lag -= self.saved_fpos
assert lag >= 0
return lag
def get_all_filenames (self):
""" Return sorted list of all log file names """
lfni = glob.iglob (os.path.join (self.logdir, self.logmask))
lfns = sorted (lfni, reverse = self.reverse_sort)
return lfns
def get_last_filename (self):
""" Return the name of latest log file """
files = self.get_all_filenames()
if files:
return files[-1]
return None
def get_next_filename (self):
""" Return the name of "next" log file """
files = self.get_all_filenames()
if not files:
return None
try:
i = files.index (self.logfile)
if not self.first:
fn = files[i+1]
else:
fn = files[i]
except ValueError:
fn = files[-1]
except IndexError:
fn = files[i]
return fn
def get_save_filename (self):
""" Return the name of save file """
return os.path.splitext(self.pidfile)[0] + ".save"
def save_file_pos (self):
self.save_file.truncate (0)
self.save_file.write ("%i\t%s" % (self.bufseek, self.logfile))
self.log.debug ("saved offset %i for %s", self.bufseek, self.logfile)
def is_new_file_available (self):
if self.op_mode in (None, '', 'classic'):
return (self.logfile != self.get_next_filename())
elif self.op_mode == 'rotated':
st = os.stat (self.logfile)
return (st.st_dev != self.logf_dev or st.st_ino != self.logf_ino)
else:
raise ValueError ("unsupported mode of operation")
def try_open_file (self, name):
""" Try open log file; sleep a bit if unavailable. """
if name:
assert self.buffer.tell() == 0
try:
self.logf = open (name, 'rb')
self.logfile = name
self.logfpos = 0
self.bufseek = 0
self.send_stats() # better do it async me think (?)
self.log.info ("Tailing %s", self.logfile)
self.stat_inc ('tailed_files')
self.tailed_files += 1
self.probesleft = self.PROBESLEFT
st = os.fstat (self.logf.fileno())
self.logf_dev, self.logf_ino = st.st_dev, st.st_ino
except IOError, e:
self.log.info ("%s", e)
time.sleep (0.2)
else:
self.log.debug ("no logfile available, waiting")
time.sleep (0.2)
def tail (self):
""" Keep reading from log file (line by line), switch to next file if current file is exhausted.
"""
while not self.last_sigint:
if not self.logf:
# if not already open, keep trying until it becomes available
self.try_open_file (self.get_next_filename())
continue
if self.first:
# seek to saved position or end of first file
if self.saved_fpos:
self.logf.seek (self.saved_fpos, os.SEEK_SET)
else:
self.logf.seek (0, os.SEEK_END)
self.bufseek = self.logfpos = self.logf.tell()
self.log.info ("started at file position %i", self.logfpos)
self.first = False
if self.file_mode == 'binary':
line = self.logf.read (self.buf_maxbytes)
else:
line = self.logf.readline()
if line:
s = len(line)
self.logfpos += s
self.tailed_bytes += s
self.buffer.write(line)
self.buflines += 1
if ((self.buf_maxbytes is not None and self.buffer.tell() >= self.buf_maxbytes) or
(self.buf_maxlines is not None and self.buflines >= self.buf_maxlines)):
self.send_frag()
if self.probesleft < self.PROBESLEFT:
self.log.info ("DEBUG: new data in old log (!)")
continue
# reset EOF condition for next attempt
self.logf.seek (0, os.SEEK_CUR)
if self.buffer.tell() > 0 and self.compression in (None, '', 'none'):
self.send_frag()
elif self.is_new_file_available():
if self.probesleft <= 0:
self.log.trace ("new log, closing old one")
self.send_frag()
self.logf.close()
self.logf = None
else:
self.log.trace ("new log, still waiting for old one")
self.probesleft -= 1
time.sleep (0.1)
else:
self.log.trace ("waiting")
time.sleep (0.1)
def send_frag (self):
bufsize = self.buffer.tell()
if bufsize == 0:
return
start = time.time()
if self.compression in (None, '', 'none'):
buf = self.buffer.getvalue()
else:
buf = cc.util.compress (self.buffer.getvalue(), self.compression,
{'level': self.compression_level})
self.log.debug ("compressed from %i to %i", bufsize, len(buf))
if self.use_blob:
data = ''
blob = buf
else:
data = buf.encode('base64')
blob = None
msg = LogtailMessage(
filename = self.logfile,
comp = self.compression,
fpos = self.bufseek,
data = data,
op_mode = self.op_mode,
st_dev = self.logf_dev,
st_ino = self.logf_ino)
if self.msg_suffix:
msg.req += '.' + self.msg_suffix
self.ccpublish (msg, blob)
elapsed = time.time() - start
self.log.debug ("sent %i bytes in %f s", len(buf), elapsed)
self.stat_inc ('duration', elapsed) # json/base64/compress time, actual send happens async
self.stat_inc ('count')
self.stat_inc ('tailed_bytes', bufsize)
self.bufseek += bufsize
self.buffer.truncate(0)
self.buflines = 0
assert self.bufseek == self.logfpos
self.save_file_pos()
def work (self):
self.connect_cc()
self.log.info ("Watching %s", os.path.join (self.logdir, self.logmask))
try:
self.tail()
except (IOError, OSError), e:
self.log.error ("%s", e)
return 1
def stop (self):
super(LogfileTailer, self).stop()
self.log.info ("stopping")
if __name__ == '__main__':
s = LogfileTailer ('logfile_tailer', sys.argv[1:])
s.start()
|
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of DynamicNormalization.
DynamicNormalization differs from BatchNormalization in the following aspects:
1) It assumes each input activation belongs to one of many clusters and the
number of clusters can grow dynamically in mode dm_ops.LOOKUP_WITH_GROW.
2) The normalization equation is derived based on the assumption that a layer
computes a Gaussian distribution, and it is shown that the resulting models
often outperform BatchNormalization.
3) Compared to BatchNormalization, DynamicNormalization works well in any batch
size.
"""
import typing
from research.carls import dynamic_embedding_config_pb2 as de_config_pb2
from research.carls import dynamic_memory_ops as dm_ops
import tensorflow as tf
class DynamicNormalization(tf.keras.layers.Layer):
r"""Keras' layer implementation for DynamicNormalization.
Similar to Batch Normalization, DynamicNormalization normalizes the
activations of the previous layer for each input.
The normalization formula:
((|mean|^2 + p(mean)) / |x|^2 - 2 * g(mean)) / sqrt(variance)
where
p(mean) = prior_scale * mean + prior_offset
g(mean) = mean_scale * mean + mean_offset.
Here (prior_scale, prior_offset, mean_scale, mean_offset) are learnable
parameters.
"""
def __init__(self,
dm_config: de_config_pb2.DynamicEmbeddingConfig,
mode: int,
axis: int = -1,
epsilon: float = 1e-3,
scale_initializer='ones',
offset_initializer='zeros',
scale_regularizer=None,
offset_regularizer=None,
scale_constraint=None,
offset_constraint=None,
use_batch_normalization: bool = False,
trainable=True,
service_address: typing.Text = '',
name=None,
**kwargs):
r"""Constructor of DynamicNormalization.
Args:
dm_config: An instance of DynamicEmbeddingConfig.
mode: An int or a `Tensor` whose value must be one of
{LOOKUP_WITHOUT_UPDATE, LOOKUP_WITH_UPDATE, LOOKUP_WITH_GROW} as defined
in dynamic_memory_ops.py.
axis: Integer, the axis along which to compute mean and variance.
epsilon: Small float added to variance to avoid dividing by zero.
scale_initializer: Initializer for the scale weight.
offset_initializer: Initializer for the offset weight.
scale_regularizer: Optional regularizer for the scale weight.
offset_regularizer: Optional regularizer for the offset weight.
scale_constraint: Optional constraint for the scale weight.
offset_constraint: Optional constraint for the offset weight.
use_batch_normalization: Boolean, if `True`, use BatchNormalization's
formula instead of DynamicNormalization's own one when computing the
output.
trainable: Boolean, if `True` the variables will be marked as trainable.
service_address: The address of a knowledge bank service. If empty, the
value passed from --kbs_address flag will be used instead.
name: A string indicating the op's name.
**kwargs: Addition inputs.
"""
super(DynamicNormalization, self).__init__(name=name, **kwargs)
if mode is None:
raise ValueError('Must specify model mode.')
self.axis = axis
self.dm_config = dm_config
self.mode = mode
self.epsilon = epsilon
self.scale_initializer = tf.keras.initializers.get(scale_initializer)
self.offset_initializer = tf.keras.initializers.get(offset_initializer)
self.scale_regularizer = tf.keras.regularizers.get(scale_regularizer)
self.offset_regularizer = tf.keras.regularizers.get(offset_regularizer)
self.scale_constraint = tf.keras.constraints.get(scale_constraint)
self.offset_constraint = tf.keras.constraints.get(offset_constraint)
self.use_batch_normalization = use_batch_normalization
self.trainable = trainable
self.service_address = service_address
@property
def trainable(self):
return self._trainable
@trainable.setter
def trainable(self, value):
self._trainable = value
@property
def _param_dtype(self):
# Raise parameters of fp16 batch norm to fp32
if self.dtype == tf.dtypes.float16 or self.dtype == tf.dtypes.bfloat16:
return tf.dtypes.float32
else:
return self.dtype or tf.dtypes.float32
def _add_offset(self, name: typing.Text, shape):
return self.add_weight(
name=name,
shape=shape,
dtype=self._param_dtype,
initializer=self.offset_initializer,
regularizer=self.offset_regularizer,
constraint=self.offset_constraint,
trainable=True,
experimental_autocast=False)
def _add_scale(self, name: typing.Text, shape):
return self.add_weight(
name=name,
shape=shape,
dtype=self._param_dtype,
initializer=self.scale_initializer,
regularizer=self.scale_regularizer,
constraint=self.scale_constraint,
trainable=True,
experimental_autocast=False)
def build(self, input_shape):
input_shape = tf.TensorShape(input_shape)
if not input_shape.ndims:
raise ValueError('Input has undefined rank:', input_shape)
ndims = len(input_shape)
# Convert axis to list and resolve negatives
if isinstance(self.axis, int):
self.axis = [self.axis]
for idx, x in enumerate(self.axis):
if x < 0:
self.axis[idx] = ndims + x
# Validate axes
for x in self.axis:
if x < 0 or x >= ndims:
raise ValueError('Invalid axis: %d' % x)
if len(self.axis) != len(set(self.axis)):
raise ValueError('Duplicate axis: %s' % self.axis)
axis_to_dim = {x: input_shape.dims[x].value for x in self.axis}
for x in axis_to_dim:
if axis_to_dim[x] is None:
raise ValueError('Input has undefined `axis` dimension. Input shape: ',
input_shape)
self.input_spec = tf.keras.layers.InputSpec(ndim=ndims, axes=axis_to_dim)
if len(axis_to_dim) == 1:
# Single axis batch norm (most common/default use-case)
param_shape = (list(axis_to_dim.values())[0],)
else:
# Parameter shape is the original shape but with 1 in all non-axis dims
param_shape = [
axis_to_dim[i] if i in axis_to_dim else 1 for i in range(ndims)
]
self.mean_offset = self._add_offset('mean_offset', param_shape)
self.mean_scale = self._add_scale('mean_scale', param_shape)
if not self.use_batch_normalization:
self.prior_offset = self._add_offset('prior_offset', param_shape)
self.prior_scale = self._add_scale('prior_scale', param_shape)
self.built = True
def _get_training_value(self, training=None):
if training is None:
training = tf.keras.backend.learning_phase()
return training
def call(self, inputs, training=None):
training = self._get_training_value(training)
inputs_dtype = inputs.dtype.base_dtype
if inputs_dtype in (tf.float16, tf.bfloat16):
# Do all math in float32 if given 16-bit inputs for numeric stability.
# In particular, it's very easy for variance to overflow in float16 and
# for safety we also choose to cast bfloat16 to float32.
inputs = tf.cast(inputs, tf.float32)
# Compute the axes along which to reduce the mean / variance
input_shape = inputs.shape
ndims = len(input_shape)
reduction_axes = [i for i in range(ndims) if i not in self.axis]
# Stops gradient update for the layers below in grow mode.
# Intuitively when a new cluster is created, the gradients sent down to the
# lower layers can disrupt the original weight, so it makes more sense to
# freeze the other part when growing.
inputs = tf.cond(
tf.equal(self.mode, dm_ops.LOOKUP_WITH_GROW),
lambda: tf.stop_gradient(inputs), lambda: inputs)
# Broadcasting only necessary for single-axis batch norm where the axis is
# not the last dimension
broadcast_shape = [1] * ndims
broadcast_shape[self.axis[0]] = input_shape.dims[self.axis[0]].value
def _broadcast(v):
if (v is not None and len(v.shape) != ndims and
reduction_axes != list(range(ndims - 1))):
return tf.reshape(v, broadcast_shape)
return v
mean_scale, mean_offset = _broadcast(self.mean_scale), _broadcast(
self.mean_offset)
if not self.use_batch_normalization:
prior_scale, prior_offset = _broadcast(self.prior_scale), _broadcast(
self.prior_offset)
# Looks up mean and variances of from dynamic Gaussian memory.
self.mean, self.variance, self.distance, self.cluster_id = (
dm_ops.dynamic_gaussian_memory_lookup(
inputs,
self.mode,
self.dm_config,
'dm_lookup',
service_address=self.service_address))
if self.use_batch_normalization:
outputs = tf.nn.batch_normalization(inputs, self.mean, self.variance,
mean_offset, mean_scale, self.epsilon)
else:
outputs = dynamic_normalization(inputs, self.mean, self.variance,
prior_offset, mean_offset, prior_scale,
mean_scale, self.epsilon)
# If some components of the shape got lost due to adjustments, fix that.
outputs.set_shape(input_shape)
return outputs
def dynamic_normalization(x: tf.Tensor, mean: tf.Tensor, variance: tf.Tensor,
prior_offset: tf.Tensor, mean_offset: tf.Tensor,
prior_scale: tf.Tensor, mean_scale: tf.Tensor,
variance_epsilon: float):
r"""Normalizes a tensor `x` based on the DyanmicNormalization formula.
The normalization formula:
((|mean|^2 + p(mean)) / |x|^2 - 2 * g(mean)) / sqrt(variance)
where
p(mean) = prior_scale * mean + prior_offset
g(mean) = mean_scale * mean + mean_offset.
Here (prior_scale, prior_offset, mean_scale, mean_offset) are learnable
parameters.
Args:
x: Input `Tensor` of arbitrary dimensionality.
mean: A mean `Tensor`.
variance: A variance `Tensor`.
prior_offset: An offset `Tensor` for the prior term.
mean_offset: An offset `Tensor` for estimating the mean value.
prior_scale: A scale `Tensor` for the prior term.
mean_scale: A scale `Tensor` for estimating the mean value.
variance_epsilon: A small float number to avoid dividing by 0.
Returns:
The normalized, scaled, offset tensor.
"""
with tf.name_scope('dynamic_normalization'):
inv = tf.math.rsqrt(variance + variance_epsilon)
# Computes (|mean|^2 + (scale * mean + offset)) / (|x|^2 + epsilon)
dividend = tf.reduce_sum(tf.math.square(mean), -1, keepdims=True)
dividend += prior_scale * mean + prior_offset
divisor = tf.reduce_sum(
tf.math.square(x), -1, keepdims=True) + variance_epsilon
scale = dividend / divisor
# Computes the dynamic normalization.
return ((1 + scale) * x - 2 * (mean * mean_scale + mean_offset)) * inv
|
|
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from django.views.generic import ListView, TemplateView, FormView, View
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.views.generic.detail import DetailView
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse, reverse_lazy
from django.utils.translation import ugettext as _
from django.contrib.formtools.wizard.views import SessionWizardView as MyWizardView
from .forms import *
from .models import *
import re
class LoginRequiredMixin:
@classmethod
def as_view(cls, **initkwargs):
view = super(LoginRequiredMixin, cls).as_view(**initkwargs)
return login_required(view)
VOTING_EVENT_FIELDS = ['title', 'description', 'starting_date', 'expiration_date',
'allow_revote']
class VotingEventList(LoginRequiredMixin, ListView):
model = VotingEvent
queryset = model.objects.all().order_by('-expiration_date')
template_name = 'voting/voting_event_list.html'
class VotingEventCreate(LoginRequiredMixin, CreateView):
model = VotingEvent
fields = VOTING_EVENT_FIELDS
template_name = 'voting/voting_event_create.html'
context_object_name = 'event'
def get_success_url(self):
return self.object.url_edit
class VotingEventEdit(LoginRequiredMixin, UpdateView):
model = VotingEvent
fields = VOTING_EVENT_FIELDS
template_name = 'voting/voting_event_edit.html'
context_object_name = 'event'
success_url = reverse_lazy('voting_event_list')
class VotingEventDelete(LoginRequiredMixin, DeleteView):
model = VotingEvent
template_name = 'voting/voting_event_delete.html'
success_url = reverse_lazy('voting_event_list')
context_object_name = 'event'
class VotingEventStatus(LoginRequiredMixin, DetailView):
template_name = 'voting/voting_event_status.html'
model = VotingEvent
context_object_name = 'event'
def get_context_data(self, **kwargs):
context = super(VotingEventStatus, self).get_context_data(**kwargs)
# sort candidates by number of obtained votes (descending)
candidates = list(self.object.candidates.all())
candidates.sort(key=lambda candidate: -candidate.voters.all().count())
context['candidates'] = candidates
return context
class RedirectToVotingEvent:
def get_success_url(self, **kwargs):
event = self.object.event
return event.url_edit
# candidate views
CANDIDATE_FIELDS = ['full_name']
class CandidateMixin(LoginRequiredMixin, RedirectToVotingEvent):
pass
class CandidateCreate(CandidateMixin, CreateView):
model = Candidate
fields = CANDIDATE_FIELDS
template_name = 'voting/candidate_create.html'
def form_valid(self, form):
form.instance.event = VotingEvent.objects.get(pk=self.kwargs['event'])
return super(CandidateCreate, self).form_valid(form)
def get_context_data(self, **kwargs):
context = super(CandidateCreate, self).get_context_data(**kwargs)
context['voting_event'] = VotingEvent.objects.get(pk=self.kwargs['event'])
return context
class CandidateEdit(CandidateMixin, UpdateView):
model = Candidate
fields = CANDIDATE_FIELDS
template_name = 'voting/candidate_edit.html'
class CandidateDelete(CandidateMixin, DeleteView):
model = Candidate
template_name = 'voting/candidate_delete.html'
# voter views
# A voting event usually has a large number of eligible voters. Therefore, the
# voter-adding interface is implemented as a custom form that enables the
# administrator to add multiple voters quickly.
class VoterMixin(LoginRequiredMixin):
def get_voting_event(self):
return VotingEvent.objects.get(pk=self.kwargs['event'])
class AddVoterWizard(VoterMixin, MyWizardView):
template_name = 'voting/add_voter_wizard.html'
form_list = [AddVoterForm, AddVoterConfirmForm]
def get_success_url(self, **kwargs):
return self.get_voting_event().url_edit
def get_context_data(self, **kwargs):
context = super(AddVoterWizard, self).get_context_data(**kwargs)
context['voting_event'] = self.get_voting_event()
# pass in the generated voters in the last step
if self.steps.current == self.steps.last:
data = self.get_cleaned_data_for_step('0')
voters = parse_voters(data['voters_input']) # this step should't fail
context['voters'] = voters
return context
def done(self, form_list, form_dict, **kwargs):
voter_form = form_dict['0']
event = self.get_voting_event()
voter_form.save(voting_event=event)
return HttpResponseRedirect(self.get_success_url())
class VoterList(VoterMixin, ListView):
template_name = 'voting/voter_list.html'
context_object_name = 'voters'
def get_queryset(self):
self.event = self.get_voting_event()
return Voter.objects.filter(event=self.event)
def get_context_data(self, **kwargs):
context = super(VoterList, self).get_context_data(**kwargs)
context['event'] = self.get_voting_event()
return context
# same os VoterList, but using a different template
class VotersPrint(VoterList):
template_name = 'voting/voters_print.html'
# same as VotersPrint but with different template
class VotingResultPrint(VotersPrint):
template_name = 'voting/voting_result_print.html'
# vote views
# these views don't require login
class WelcomePage(FormView):
form_class = CheckInfoForm
template_name = 'voting/welcome_page.html'
def dispatch(self, request, *args, **kwargs):
# redirect to voting page if session['voter_id'] is already set
if 'voter_id' in self.request.session:
return HttpResponseRedirect(reverse('vote'))
return super(WelcomePage, self).dispatch(request, *args, **kwargs)
def get_initial(self):
return {'username': '',
'passphrase': ''}
def form_valid(self, form):
data = form.cleaned_data
# find possible users with (username, passphrase)
voters = Voter.objects.filter(username=data['username'],
passphrase=data['passphrase'])
if voters:
# TODO: check for multiple voters with the same (username, passphrase)
voter = voters[0]
if voter.event.is_expired:
error = _("Sorry, the voting event has expired.")
elif (not voter.event.allow_revote) and voter.voted:
error = _("You already voted and can't vote again!")
else:
self.request.session['voter_id'] = voter.pk
return HttpResponseRedirect(reverse('vote'))
else:
error = _("The username or passphrase you input is invalid")
# validation error, display the form again
context = {
'form': form,
'error': error,
}
return render(self.request, self.template_name, context)
class VoteView(View):
template_name = 'voting/vote.html'
http_method_names = ['get', 'post']
def dispatch(self, request, *args, **kwargs):
# redirect to welcome_page if no voter information in session
if 'voter_id' not in request.session:
return HttpResponseRedirect(reverse('welcome_page'))
try:
voter = Voter.objects.get(pk=request.session['voter_id'])
except Voter.DoesNotExist:
self.clear_session();
return HttpResponseRedirect(reverse('welcome_page'))
# enforce expiration date
if voter.event.is_expired:
return HttpResponseRedirect(reverse('welcome_page'))
return super(VoteView, self).dispatch(request, *args, **kwargs)
def get_voter(self):
return Voter.objects.get(pk=self.request.session['voter_id'])
def get(self, request):
voter = self.get_voter()
return self.display_form(request)
def display_form(self, request, error=None, default_choices=None):
voter = self.get_voter()
context = {
'candidates': voter.event.candidates.all().order_by('pk'),
'event': voter.event,
'voter': voter,
'error': error,
'choices': default_choices,
}
return render(request, self.template_name, context)
def display_confirm_page(self, request, choices):
return render(request, 'voting/vote_confirm.html', {
# Convert the choices dict to a tuple list (candidate, choice).
# The tuple list is sorted by the candidate id
# FIXME: this is not covered by the unit test
'choices_tuple': sorted(list(choices.items()),
key=lambda x: x[0].pk),
})
def clear_session(self):
self.request.session.pop('voter_id', None)
def post(self, request):
# TODO: form display and validation should be extract to a form class
voter = self.get_voter()
choices = {}
for choice in request.POST:
match = re.match(r'^candidate_(?P<event>\d+)$', choice)
if match:
candidate_id = match.group('event')
candidate = Candidate.objects.get(pk=candidate_id)
choices[candidate] = request.POST[choice]
if 'cancel' in request.POST: # user cancels, return to welcome page
self.clear_session()
return HttpResponseRedirect(reverse('welcome_page'))
if not choices:
return self.display_form(request,
error=_("Please choose one candidate"))
# check if the candidates are all in the same event
for candidate in choices:
if candidate.event != voter.event:
return self.display_form(request,
error=_("The candidate you chose doesn't belong to this vote!"))
if 'confirm' in request.POST:
for candidate in voter.event.candidates.all():
if (candidate not in choices) or (choices[candidate] == 'N'): # not selected
voter.unvote(candidate)
else:
agree = (choices[candidate] == 'A')
# vote
voter.vote_for(candidate, agree=agree)
voter.save()
self.clear_session()
return render(request, 'voting/end_message.html')
elif 'modify' in request.POST:
return self.display_form(request, error=None,
default_choices=choices)
else: # not confirmed
return self.display_confirm_page(request, choices)
|
|
# This code was mostly based on ipaddr-py
# Copyright 2007 Google Inc. http://code.google.com/p/ipaddr-py/
# Licensed under the Apache License, Version 2.0 (the "License").
from django.core.exceptions import ValidationError
from django.utils.six.moves import xrange
def clean_ipv6_address(ip_str, unpack_ipv4=False,
error_message="This is not a valid IPv6 address"):
"""
Cleans a IPv6 address string.
Validity is checked by calling is_valid_ipv6_address() - if an
invalid address is passed, ValidationError is raised.
Replaces the longest continious zero-sequence with "::" and
removes leading zeroes and makes sure all hextets are lowercase.
Args:
ip_str: A valid IPv6 address.
unpack_ipv4: if an IPv4-mapped address is found,
return the plain IPv4 address (default=False).
error_message: A error message for in the ValidationError.
Returns:
A compressed IPv6 address, or the same value
"""
best_doublecolon_start = -1
best_doublecolon_len = 0
doublecolon_start = -1
doublecolon_len = 0
if not is_valid_ipv6_address(ip_str):
raise ValidationError(error_message)
# This algorithm can only handle fully exploded
# IP strings
ip_str = _explode_shorthand_ip_string(ip_str)
ip_str = _sanitize_ipv4_mapping(ip_str)
# If needed, unpack the IPv4 and return straight away
# - no need in running the rest of the algorithm
if unpack_ipv4:
ipv4_unpacked = _unpack_ipv4(ip_str)
if ipv4_unpacked:
return ipv4_unpacked
hextets = ip_str.split(":")
for index in range(len(hextets)):
# Remove leading zeroes
hextets[index] = hextets[index].lstrip('0')
if not hextets[index]:
hextets[index] = '0'
# Determine best hextet to compress
if hextets[index] == '0':
doublecolon_len += 1
if doublecolon_start == -1:
# Start of a sequence of zeros.
doublecolon_start = index
if doublecolon_len > best_doublecolon_len:
# This is the longest sequence of zeros so far.
best_doublecolon_len = doublecolon_len
best_doublecolon_start = doublecolon_start
else:
doublecolon_len = 0
doublecolon_start = -1
# Compress the most suitable hextet
if best_doublecolon_len > 1:
best_doublecolon_end = (best_doublecolon_start +
best_doublecolon_len)
# For zeros at the end of the address.
if best_doublecolon_end == len(hextets):
hextets += ['']
hextets[best_doublecolon_start:best_doublecolon_end] = ['']
# For zeros at the beginning of the address.
if best_doublecolon_start == 0:
hextets = [''] + hextets
result = ":".join(hextets)
return result.lower()
def _sanitize_ipv4_mapping(ip_str):
"""
Sanitize IPv4 mapping in a expanded IPv6 address.
This converts ::ffff:0a0a:0a0a to ::ffff:10.10.10.10.
If there is nothing to sanitize, returns an unchanged
string.
Args:
ip_str: A string, the expanded IPv6 address.
Returns:
The sanitized output string, if applicable.
"""
if not ip_str.lower().startswith('0000:0000:0000:0000:0000:ffff:'):
# not an ipv4 mapping
return ip_str
hextets = ip_str.split(':')
if '.' in hextets[-1]:
# already sanitized
return ip_str
ipv4_address = "%d.%d.%d.%d" % (
int(hextets[6][0:2], 16),
int(hextets[6][2:4], 16),
int(hextets[7][0:2], 16),
int(hextets[7][2:4], 16),
)
result = ':'.join(hextets[0:6])
result += ':' + ipv4_address
return result
def _unpack_ipv4(ip_str):
"""
Unpack an IPv4 address that was mapped in a compressed IPv6 address.
This converts 0000:0000:0000:0000:0000:ffff:10.10.10.10 to 10.10.10.10.
If there is nothing to sanitize, returns None.
Args:
ip_str: A string, the expanded IPv6 address.
Returns:
The unpacked IPv4 address, or None if there was nothing to unpack.
"""
if not ip_str.lower().startswith('0000:0000:0000:0000:0000:ffff:'):
return None
hextets = ip_str.split(':')
return hextets[-1]
def is_valid_ipv6_address(ip_str):
"""
Ensure we have a valid IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A boolean, True if this is a valid IPv6 address.
"""
from django.core.validators import validate_ipv4_address
# We need to have at least one ':'.
if ':' not in ip_str:
return False
# We can only have one '::' shortener.
if ip_str.count('::') > 1:
return False
# '::' should be encompassed by start, digits or end.
if ':::' in ip_str:
return False
# A single colon can neither start nor end an address.
if ((ip_str.startswith(':') and not ip_str.startswith('::')) or
(ip_str.endswith(':') and not ip_str.endswith('::'))):
return False
# We can never have more than 7 ':' (1::2:3:4:5:6:7:8 is invalid)
if ip_str.count(':') > 7:
return False
# If we have no concatenation, we need to have 8 fields with 7 ':'.
if '::' not in ip_str and ip_str.count(':') != 7:
# We might have an IPv4 mapped address.
if ip_str.count('.') != 3:
return False
ip_str = _explode_shorthand_ip_string(ip_str)
# Now that we have that all squared away, let's check that each of the
# hextets are between 0x0 and 0xFFFF.
for hextet in ip_str.split(':'):
if hextet.count('.') == 3:
# If we have an IPv4 mapped address, the IPv4 portion has to
# be at the end of the IPv6 portion.
if not ip_str.split(':')[-1] == hextet:
return False
try:
validate_ipv4_address(hextet)
except ValidationError:
return False
else:
try:
# a value error here means that we got a bad hextet,
# something like 0xzzzz
if int(hextet, 16) < 0x0 or int(hextet, 16) > 0xFFFF:
return False
except ValueError:
return False
return True
def _explode_shorthand_ip_string(ip_str):
"""
Expand a shortened IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A string, the expanded IPv6 address.
"""
if not _is_shorthand_ip(ip_str):
# We've already got a longhand ip_str.
return ip_str
new_ip = []
hextet = ip_str.split('::')
# If there is a ::, we need to expand it with zeroes
# to get to 8 hextets - unless there is a dot in the last hextet,
# meaning we're doing v4-mapping
if '.' in ip_str.split(':')[-1]:
fill_to = 7
else:
fill_to = 8
if len(hextet) > 1:
sep = len(hextet[0].split(':')) + len(hextet[1].split(':'))
new_ip = hextet[0].split(':')
for _ in xrange(fill_to - sep):
new_ip.append('0000')
new_ip += hextet[1].split(':')
else:
new_ip = ip_str.split(':')
# Now need to make sure every hextet is 4 lower case characters.
# If a hextet is < 4 characters, we've got missing leading 0's.
ret_ip = []
for hextet in new_ip:
ret_ip.append(('0' * (4 - len(hextet)) + hextet).lower())
return ':'.join(ret_ip)
def _is_shorthand_ip(ip_str):
"""Determine if the address is shortened.
Args:
ip_str: A string, the IPv6 address.
Returns:
A boolean, True if the address is shortened.
"""
if ip_str.count('::') == 1:
return True
if [x for x in ip_str.split(':') if len(x) < 4]:
return True
return False
|
|
#Minecraft Ordance Survey Sat Nav
#www.stuffaboutcode.com
#Martin O'Hanlon
import urllib, urllib2
import json
import mcpi.minecraft as minecraft
import mcpi.block as block
import mcpi.minecraftstuff as minecraftstuff
import time
import cmd
from math import floor
from OSConversion import *
#MapQuest API Key
MAPQUESTAPIKEY = "Fmjtd%7Cluurn9u825%2Crx%3Do5-9wzsg0"
#spawn location
SPAWNX = 17493
SPAWNY = 40
SPAWNZ = 47383
#minecraft ordinance survey sat nav
class MinecraftOSNav():
def __init__(self):
pass
#calls the mapquest directions api
def callDirectionsAPI(self, origin, destination):
#the mapquest directions api url
URL = "http://open.mapquestapi.com/directions/v2/route?key=" + MAPQUESTAPIKEY + "&ambiguities=ignore&"
#build the url for this search, appending origin, destination and key
directionsURL = URL + urllib.urlencode({"from": origin, "to": destination})
#debug
#print directionsURL
#call the api and get the response
req = urllib2.Request(directionsURL)
res = urllib2.urlopen(req).read()
#load the json
result = json.loads(res)
#debug save the json
#with open("directionApiResponse.json", "w") as outfile:
# json.dump(result, outfile)
#debug - print json
#print json.dumps(result, sort_keys=True, indent=4, separators=(',', ': '))
#return the result
return result
#def callDirectionsAPIStub(self, origin, destination):
# directionData=open("directionApiResponse.json")
#
# res = json.load(directionData)
#
# return res
#calls the mapquest geocoding api
def callGeocodingAPI(self, location):
#the mapquest directions api url
URL = "http://open.mapquestapi.com/geocoding/v1/address?key=" + MAPQUESTAPIKEY + "&ambiguities=ignore&"
#build the url for this search, appending origin, destination and key
directionsURL = URL + urllib.urlencode({"location": location})
#debug
#print directionsURL
#call the api and get the response
req = urllib2.Request(directionsURL)
res = urllib2.urlopen(req).read()
#load the json
result = json.loads(res)
#debug save the json
#with open("locationAPIResponse.json", "w") as outfile:
# json.dump(result, outfile)
#debug - print json
#print json.dumps(result, sort_keys=True, indent=4, separators=(',', ': '))
#return the result
return result
#convert latitude and longitude to minecraft OSGB map X & Z coordinates
#reverse engineered from http://www.ordnancesurvey.co.uk//innovate/developers/minecraft-coordinate-inset.html
def convertLatLonToMCXZ(self, lat, lon):
easting, northing = WGS84toOSGB36(lat, lon)
x = floor(easting / 25)
z = floor((1300000 - northing) / 25)
return int(x),int(z)
def convertMCXZToLatLon(self, x, z):
easting = floor(x * 25)
northing = floor(((z * 25) - 1300000) * -1)
lat, lon = OSGB36toWGS84(easting, northing)
return lat, lon
# converts minecraft xyz to be reflective of spawn point
# as raspberry juice uses spawn point as 0,0,0
def convertXYZToRaspiXYZ(self, x,y,z):
return int(x - SPAWNX), int(y - SPAWNY), int(z - SPAWNZ)
# converts minecraft xyz to be reflective of spawn point
# as raspberry juice uses spawn point as 0,0,0
def convertVec3ToRaspiVec3(self, vec3):
return minecraft.Vec3(int(vec3.x - SPAWNX), int(vec3.y - SPAWNY), int(vec3.z - SPAWNZ))
# converts xyz which comes from raspi to actual co-ordinates
def convertRaspiXYZtoXYZ(self, x, y, z):
return int(x + SPAWNX), int(y + SPAWNY), int(z + SPAWNZ)
# converts xyz which comes from raspi to actual co-ordinates
def convertRaspiVec3toVec3(self, vec3):
return minecraft.Vec3(int(vec3.x + SPAWNX), int(vec3.y + SPAWNY), int(vec3.z + SPAWNZ))
def convertDirectionsIntoMinecraftDirections(self, directions):
#go through the directions and convert the steps to minecraft co-ordinates
mcDirections = []
#get the legs of the journey
legs = directions["route"]["legs"]
#loop through the legs
for leg in legs:
#loop through the steps and make a list of minecraft locations and directions
for step in leg["maneuvers"]:
x, z = self.convertLatLonToMCXZ(step["startPoint"]["lat"], step["startPoint"]["lng"])
mcDirections.append([self.convertVec3ToRaspiVec3(minecraft.Vec3(x,100,z)), step["narrative"]])
#debug print steps
#print("Step Lat:{}, Lon:{}".format(x,z))
return mcDirections
#create the route in Minecraft
def followRoute(self, mc, mcDirections):
#draw the route in minecraft
#find the first direction
direction1 = mcDirections[0]
point1 = direction1[0]
narrative = direction1[1]
#move player to the first coord
mc.player.setTilePos(point1.x, mc.getHeight(point1.x, point1.z), point1.z)
#loop through all the points
for direction2 in mcDirections[1:]:
#put the directions on the screen
mc.postToChat(narrative.encode('ascii', 'ignore'))
#draw the line between1 the points
point2 = direction2[0]
self.drawLineLevelWithGround(mc,
point1.x, point1.y, point1.z,
point2.x, point2.y, point2.z,
3,
block.WOOL.id, 15)
#wait till the player has reached the point
pos = mc.player.getTilePos()
while(pos.x != point2.x and pos.z != point2.z):
pos = mc.player.getTilePos()
time.sleep(0.1)
#move onto the next point
direction1 = direction2
point1 = direction1[0]
narrative = direction1[1]
#clear the route in minecraft
def clearRoute(self, mc, mcDirections):
#find the first direction
direction1 = mcDirections[0]
point1 = direction1[0]
narrative = direction1[1]
#loop through all the points
for direction2 in mcDirections[1:]:
#draw the line between the points
point2 = direction2[0]
self.drawLineLevelWithGround(mc,
point1.x, point1.y, point1.z,
point2.x, point2.y, point2.z,
-1,
block.AIR.id)
#move onto the next point
direction1 = direction2
point1 = direction1[0]
narrative = direction1[1]
#draw a line between 2 points level with the ground
def drawLineLevelWithGround(self, mc, x1, y1, z1, x2, y2, z2, heightAboveGround, blockType, blockData = 0):
#create minecraft drawing object, used to create the lines
mcDrawing = minecraftstuff.MinecraftDrawing(mc)
#get the line
blocksInLine = mcDrawing.getLine(x1,y1,z1,x2,y2,z2)
#loop through blocks in line
for blockInLine in blocksInLine[1:]:
#get height of land
y = mc.getHeight(blockInLine.x, blockInLine.z) + heightAboveGround
#create the block
mc.setBlock(blockInLine.x, y, blockInLine.z, blockType, blockData)
#navigate from a location to a destination
def navigateFrom(self, origin, destination):
print("Getting Directions Courtesy of MapQuest")
#get the directions from mapquest api
#directions = self.callDirectionsAPIStub(origin, destination)
directions = self.callDirectionsAPI(origin, destination)
#was the call a success?
if directions["info"]["statuscode"] == 0:
#create connection to minecraft
mc = minecraft.Minecraft.create()
#convert the directions into mc directions
mcDirections = self.convertDirectionsIntoMinecraftDirections(directions)
mc.postToChat("Route calculated between " + origin + " and " + destination)
#follow the route
self.followRoute(mc, mcDirections)
#when finished - clear the route
mc.postToChat("You have reached your destination. Clearing route")
self.clearRoute(mc, mcDirections)
mc.postToChat("Route cleared")
#the call to mapquest api failed
else:
print("Failed to get directions - status = {}, messages = '{}'".format(directions["info"]["statuscode"], directions["info"]["messages"][0].encode('ascii', 'ignore')))
#navigate from the players current location to a destination
def navigate(self, destination):
#create connection to minecraft
mc = minecraft.Minecraft.create()
#get the players position
pos = mc.player.getTilePos()
#get the players lat and lon
actualPos = self.convertRaspiVec3toVec3(pos)
lat, lon = self.convertMCXZToLatLon(actualPos.x, actualPos.z)
origin = str(lat) + "," + str(lon)
#get directions
directions = self.callDirectionsAPI(origin, destination)
#was the call a success?
if directions["info"]["statuscode"] == 0:
#convert the directions into mc directions
mcDirections = self.convertDirectionsIntoMinecraftDirections(directions)
mc.postToChat("Route calculated between your position and " + destination)
#follow the route
self.followRoute(mc, mcDirections)
#when finished - clear the route
mc.postToChat("You have reached your destination. Clearing route")
self.clearRoute(mc, mcDirections)
mc.postToChat("Route cleared")
#the call to mapquest api failed
else:
print("Failed to get directions - status = {}, messages = '{}'".format(directions["info"]["statuscode"], directions["info"]["messages"][0].encode('ascii', 'ignore')))
#teleport the player to a location
def teleport(self, location):
#call the mapquest api to find the location
locations = self.callGeocodingAPI(location)
#was the call a success?
if locations["info"]["statuscode"] == 0:
#was a location found?
if len(locations["results"][0]["locations"]) > 0:
#get lat and lon for first location found
lat = locations["results"][0]["locations"][0]["latLng"]["lat"]
lon = locations["results"][0]["locations"][0]["latLng"]["lng"]
#get minecraft x,z
x,z = self.convertLatLonToMCXZ(lat,lon)
#convert this to be reflect of spawn
x,y,z = self.convertXYZToRaspiXYZ(x,0,z)
#create connection to minecraft
mc = minecraft.Minecraft.create()
#find the height of the world
y = mc.getHeight(x,z)
#move player
mc.player.setTilePos(x,y,z)
mc.postToChat("Teleported to " + location)
else:
print("No location could be found for '" + location + "'")
else:
print("Failed to get location - status = {}, messages = '{}'".format(directions["info"]["statuscode"], directions["info"]["messages"][0].encode('ascii', 'ignore')))
# Class to manage the command line interface
class NavigationCommands(cmd.Cmd):
def __init__(self):
cmd.Cmd.__init__(self)
self.prompt = "OS Map Nav >> "
self.intro = "Minecraft OS Sat-Nav - www.stuffaboutcode.com"
#create navigation object
self.nav = MinecraftOSNav()
def do_exit(self, args):
"Exit navigation [exit]"
return -1
def do_navigate(self, destination):
"Navigate to a destination [navigate <destination>]"
self.nav.navigate(destination)
def do_navigateFrom(self, args):
"Navigate from a start position to a destination [navigate <start>,<destination>]"
#split the arguments
args = args.split(",")
if len(args) == 2:
self.nav.navigateFrom(args[0], args[1])
else:
print("Error: expected 2 arguments")
def do_teleport(self, location):
"Teleport to a location [teleport <location>]"
self.nav.teleport(location)
def do_EOF(self, line):
return True
#Main program
if __name__ == "__main__":
#start command line
NavigationCommands().cmdloop()
|
|
# python 3 headers, required if submitting to Ansible
from __future__ import (absolute_import, division, print_function)
import os
__metaclass__ = type
DOCUMENTATION = """
lookup: sas_license
author: Chris Lyunch <Chris.Lynch@sas.com>
version_added: "2.4.2"
short_description: read a Viya license file and determine version information
description:
- This lookup returns a dictionary of attributes about a sas license file
options:
_terms:
description: path(s) of license file zip to interogate
required: True
notes:
- if read in variable context, the file can be interpreted as YAML if the content is valid to the parser.
- this lookup does not understand 'globing' - use the fileglob lookup instead.
"""
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.plugins.lookup import LookupBase
import sys
from tempfile import mkdtemp
import zipfile
import re
import datetime
import shutil
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class SasLicenseCPU:
def __init__(self, identifier, model, modnum, serial):
# the license internal identifier for cpus in the setinit
self.identifier = identifier
# the model of cpu that this identifier is restricted to (blank means it can be anything)
self.model = model
# the model number of the cpu that this identifier is restricted to (blank mean it can be anything)
self.modnum = modnum
# the number of these cpus that are licensed to be used by sas. as I understand it, this actually translates into the maximum number of threads that can be spawned for a process.
self.serial = serial
def get_ansible_dict(self):
ansible_dict = {'identifier': self.identifier,
'model': self.model,
'modnum': self.modnum,
'serial': self.serial}
if self.serial.startswith('+'):
ansible_dict['licensed_cores'] = int(self.serial.lstrip('+'))
else:
ansible_dict['licensed_cores'] = -1
return ansible_dict
@staticmethod
def get_from_cpu_line(line):
ret = None
match = re.search(
'CPU[ ]*MODEL[ ]*=[ ]*\'([^\']*)\'[ ]*MODNUM[ ]*=[ ]*\'([^\']*)\'[ ]*SERIAL[ ]*=[ ]*\'([^\']*)\'[ ]*NAME[ ]*=[ ]*(CPU[0-9]{3,})',
line)
if match is not None:
identifier = match.group(4).strip(' ')
model = match.group(1).strip(' ')
modnum = match.group(2).strip(' ')
serial = match.group(3).strip(' ')
ret = SasLicenseCPU(identifier, model, modnum, serial)
return ret
class SasLicenseProduct:
def __init__(self, product_identifier, name):
self.identifier = product_identifier
self.name = name
self.cpu_blocks = []
self.expiration_date = None
def __str__(self):
cpu_string = None
first = True
cpu_string = ''
for cpu in self.cpu_blocks:
if cpu.serial.strip(' ') != '':
if first:
first = False
else:
cpu_string += ', '
cpu_string += cpu.serial.lstrip("+")
ret = "Sas Product '{}' licensed till {} ".format(self.name, self.expiration_date)
if cpu_string != '':
ret += "for {} cores".format(cpu_string)
return ret
def get_ansible_dict(self):
ansible_dict = {'identifier': self.identifier,
'name': self.name,
'expiration_date': self.expiration_date.strftime('%Y-%m-%d')}
for cpu in self.cpu_blocks:
if 'cpus' not in ansible_dict:
ansible_dict['cpus'] = []
ansible_dict['cpus'].append(cpu.get_ansible_dict())
return ansible_dict
@staticmethod
def get_from_license_line(line):
ret = None
match = re.search('\\*(PRODNUM[0-9]{3,}) = ([^;]+)', line)
if match is not None:
identifier = match.group(1)
name = match.group(2)
ret = SasLicenseProduct(identifier, name)
return ret
class SasLicenseSite:
def __init__(self, site_number, name):
self.number = site_number
self.name = name
self.osname = None
self.warn_days = None
self.grace_days = None
self.birthday = None
self.expire = None
self.password = None
@staticmethod
def get_from_license_line(line):
ret = None
match = re.search('\\*(PRODNUM[0-9]{3,}) = ([^;]+)', line)
re.findall()
if line:
identifier = match.group(1)
name = match.group(2)
ret = SasLicenseProduct(identifier, name)
return ret
class SasLicense:
def __init__(self, license_file_path):
self.license_file_path = license_file_path
self.viya_version = None
self.viya_major_version = None
self.viya_minor_version = None
self.release = None
self.cpus = {}
self.products = {}
self.read_from_file()
def read_from_file(self):
self.viya_version = "3.3"
self.viya_major_version = 3
self.viya_minor_version = 3
set_init_string = None
temporary_dir = mkdtemp()
target_license_information_file = ""
with zipfile.ZipFile(self.license_file_path, mode='r') as zipf:
for file in zipf.infolist():
if file.filename.endswith('Linux_x86-64.txt'):
target_license_information_file = zipf.extract(file, temporary_dir)
if file.filename.endswith('.jwt'):
self.viya_version = '3.4'
self.viya_minor_version = 4
# Read the CPU info blocks
with open(target_license_information_file, 'r') as file:
setinit_string = file.read()
self.parse_setinit(setinit_string)
shutil.rmtree(temporary_dir)
def parse_setinit(self, setinit_string):
# first we split the file into pieces
state = 1
pre_setinit_tag = []
in_setinit_tags = []
post_setinit_tags = []
for line in setinit_string.split('\n'):
trim_line = line.strip('\r')
# if we are prior to the setinit block
if state == 1:
if trim_line.strip(' ').upper().startswith('PROC SETINIT'):
state += 1
else:
pre_setinit_tag.append(trim_line)
# if we are in the setinit block
if state == 2:
if trim_line.strip(' ').upper().startswith('SAVE; RUN;'):
state += 1
else:
in_setinit_tags.append(trim_line)
elif state == 3:
post_setinit_tags.append(trim_line)
# Now get all the product identifiers to names tags (we will use them to join into the expire tags)
for line in post_setinit_tags:
product = SasLicenseProduct.get_from_license_line(line)
if product is not None:
self.products[product.identifier] = product
# chunk the sas proc setinit into the seperate imparatives for further processing
tag_blocks = []
tag_block = ""
for line in in_setinit_tags: # type: str
splits = line.split(';')
while True:
if len(splits) > 1:
tag_block += splits[0]
tag_blocks.append(tag_block)
splits.remove(splits[0])
tag_block = ""
else:
break
tag_block += splits[0]
# process the imparatives, first finding the CPU blocks
for tag_block in tag_blocks: # type: str
if tag_block.lstrip(' ').upper().startswith('CPU '):
cpu = SasLicenseCPU.get_from_cpu_line(tag_block)
if cpu is not None:
self.cpus[cpu.identifier] = cpu
for tag_block in tag_blocks: # type: str
if tag_block.lstrip(' ').upper().startswith('PROC SETINIT'):
value_matches = re.finditer('([0-9A-Za-z]+)=("([^"]*)"|\'([^\']*)\'|[^;]+|[^\\s]+)D?', tag_block)
for value_match in value_matches:
value_split = value_match.group(0).split("=")
if value_split[0].strip(" ").upper() == 'RELEASE':
self.release = value_split[0].strip(" ").strip("'")
if tag_block.lstrip(' ').upper().startswith('EXPIRE '):
product_sets = []
cpu_sets = []
expire_date = None
tag_split = tag_block.split(" ")
# reading an expire string seems to split into a list of products, then a date, then a cpu= set, so we
# will split these into stage 1 for products, 2 for date, and 3 for cpus. If there are other stages that
# can be present in an expire block, I have not seen them.
state = 1
for tag in tag_split:
if tag == '':
pass
elif tag == 'EXPIRE':
state = 1
elif state == 1 and not tag.upper().endswith('D'):
product_sets.append(self.products[tag.strip("'")])
elif state == 1 and tag.upper().endswith('D'):
expire_date = SasLicense.parse_date(tag)
state = 2
elif state == 2 and tag.upper().startswith('CPU'):
split_tag = tag.split('=')
cpu_sets.append(self.cpus[split_tag[1]])
state = 3
elif state == 3:
cpu_sets.append(self.cpus[tag])
# now we assosiate all the products with their new expire date and their cpu sets
for product in product_sets: # type: SasLicenseProduct
product.cpu_blocks.extend(cpu_sets)
product.expiration_date = expire_date
def __str__(self):
ret = 'SAS License for viya {}, marked as release {}'.format(self.viya_version, self.release)
for product_key in self.products:
ret += '\n\t{}'.format(self.products[product_key])
return ret
def get_ansible_dict(self):
license_return = {'release': self.release,
'viya_version': self.viya_version,
'viya_version_major': self.viya_major_version,
'viya_version_minor': self.viya_minor_version}
for product in self.products:
if 'products' not in license_return:
license_return['products'] = []
license_return['products'].append(self.products[product].get_ansible_dict())
return license_return
@staticmethod
def parse_date(date_string):
ret = None # type: datetime.datetime
#Confirm that this is a date format string, by checking for the D at the end.
if date_string.upper().endswith('D'):
date_string_reformated = date_string.rstrip('D')
date_string_reformated = date_string_reformated.strip("'")
ret = datetime.datetime.strptime(date_string_reformated, '%d%b%Y')
return ret
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
# lookups in general are expected to both take a list as input and output a list
# this is done so they work with the looping construct `with_`.
# Only the gods know why you would want to look through several viya licenses files, but conventions are conventions for a reason... so here we are.
ret = []
for term in terms:
display.debug("License lookup term: %s" % term)
# Find the file in the expected search path, using a class method
# that implements the 'expected' search path for Ansible plugins.
lookupfile = self.find_file_in_search_path(variables, 'files', term)
# Don't use print or your own logging, the display class
# takes care of it in a unified way.
display.vvvv(u"Sas License lookup using %s as file" % lookupfile)
try:
if lookupfile:
sas_license = SasLicense(lookupfile)
# contents, show_data = self._loader._get_file_contents(lookupfile)
ret.append(sas_license.get_ansible_dict())
else:
# Always use ansible error classes to throw 'final' exceptions,
# so the Ansible engine will know how to deal with them.
# The Parser error indicates invalid options passed
raise AnsibleParserError()
except AnsibleParserError:
raise AnsibleError("could not locate file in lookup: %s" % term)
return ret
|
|
from __future__ import division
from struct import unpack
from datetime import datetime
from niprov.basefile import BaseFile
class NeuroscanFile(BaseFile):
""" Support for the Neuroscan .cnt format.
"""
_datatypes = {'char':1, 'uchar':1, 'long':4,'ulong':4,'short':2,'ushort':2,
'float':4, 'double':8,'int':4}
def __init__(self, location, **kwargs):
super(NeuroscanFile, self).__init__(location, **kwargs)
def inspect(self):
provenance = super(NeuroscanFile, self).inspect()
header = self._read()
provenance['subject'] = header.patient.translate(None, '\x00')
nchannels = unpack('H',header.nchannels)[0]
numsamples = unpack('I',header.numsamples)[0]
sfreq = unpack('H', header.rate)[0]
provenance['sampling-frequency'] = sfreq
provenance['dimensions'] = [nchannels, numsamples]
provenance['duration'] = numsamples/sfreq
acqstring = (header.date+' '+header.time).translate(None, '\x00')
dtformat = '%d/%m/%y %H:%M:%S'
provenance['acquired'] = datetime.strptime(acqstring, dtformat)
provenance['modality'] = 'EEG'
return provenance
def _fread(self, fhandle, size, dtype):
""" Read bytes from file at current cursor position.
Emulates MATLAB fread().
"""
precision = self._datatypes[dtype]
nbytes = size*precision
return fhandle.read(nbytes)
def _read(self):
"""
Read the Neuroscan CNT file header.
This code is loosely based on the MATLAB function loadcnt.m by
Sean Fitzgibbon and Arnaud Delorme which is available under the GPLv2
license. Part of the original copyright statement and license are
reproduced below (in the source code).
"""
# Copyright (C) 2000 Sean Fitzgibbon, <psspf@id.psy.flinders.edu.au>
# Copyright (C) 2003 Arnaud Delorme, Salk Institute, arno@salk.edu
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Revision 1.21 2005/08/16 22:46:55 arno
# allowing to read event type 3
class CntHeader(object):
pass
h = CntHeader()
with self.filesystem.open(self.path,'rb') as fid:
h.rev = self._fread(fid,12,'char')
h.nextfile = self._fread(fid,1,'long')
h.prevfile = self._fread(fid,1,'ulong')
h.type = self._fread(fid,1,'char')
h.id = self._fread(fid,20,'char')
h.oper = self._fread(fid,20,'char')
h.doctor = self._fread(fid,20,'char')
h.referral = self._fread(fid,20,'char')
h.hospital = self._fread(fid,20,'char')
h.patient = self._fread(fid,20,'char')
h.age = self._fread(fid,1,'short')
h.sex = self._fread(fid,1,'char')
h.hand = self._fread(fid,1,'char')
h.med = self._fread(fid,20, 'char')
h.category = self._fread(fid,20, 'char')
h.state = self._fread(fid,20, 'char')
h.label = self._fread(fid,20, 'char')
h.date = self._fread(fid,10, 'char')
h.time = self._fread(fid,12, 'char')
h.mean_age = self._fread(fid,1,'float')
h.stdev = self._fread(fid,1,'float')
h.n = self._fread(fid,1,'short')
h.compfile = self._fread(fid,38,'char')
h.spectwincomp = self._fread(fid,1,'float')
h.meanaccuracy = self._fread(fid,1,'float')
h.meanlatency = self._fread(fid,1,'float')
h.sortfile = self._fread(fid,46,'char')
h.numevents = self._fread(fid,1,'int')
h.compoper = self._fread(fid,1,'char')
h.avgmode = self._fread(fid,1,'char')
h.review = self._fread(fid,1,'char')
h.nsweeps = self._fread(fid,1,'ushort')
h.compsweeps = self._fread(fid,1,'ushort')
h.acceptcnt = self._fread(fid,1,'ushort')
h.rejectcnt = self._fread(fid,1,'ushort')
h.pnts = self._fread(fid,1,'ushort')
h.nchannels = self._fread(fid,1,'ushort')
h.avgupdate = self._fread(fid,1,'ushort')
h.domain = self._fread(fid,1,'char')
h.variance = self._fread(fid,1,'char')
h.rate = self._fread(fid,1,'ushort')
h.scale = self._fread(fid,1,'double')
h.veogcorrect = self._fread(fid,1,'char')
h.heogcorrect = self._fread(fid,1,'char')
h.aux1correct = self._fread(fid,1,'char')
h.aux2correct = self._fread(fid,1,'char')
h.veogtrig = self._fread(fid,1,'float')
h.heogtrig = self._fread(fid,1,'float')
h.aux1trig = self._fread(fid,1,'float')
h.aux2trig = self._fread(fid,1,'float')
h.heogchnl = self._fread(fid,1,'short')
h.veogchnl = self._fread(fid,1,'short')
h.aux1chnl = self._fread(fid,1,'short')
h.aux2chnl = self._fread(fid,1,'short')
h.veogdir = self._fread(fid,1,'char')
h.heogdir = self._fread(fid,1,'char')
h.aux1dir = self._fread(fid,1,'char')
h.aux2dir = self._fread(fid,1,'char')
h.veog_n = self._fread(fid,1,'short')
h.heog_n = self._fread(fid,1,'short')
h.aux1_n = self._fread(fid,1,'short')
h.aux2_n = self._fread(fid,1,'short')
h.veogmaxcnt = self._fread(fid,1,'short')
h.heogmaxcnt = self._fread(fid,1,'short')
h.aux1maxcnt = self._fread(fid,1,'short')
h.aux2maxcnt = self._fread(fid,1,'short')
h.veogmethod = self._fread(fid,1,'char')
h.heogmethod = self._fread(fid,1,'char')
h.aux1method = self._fread(fid,1,'char')
h.aux2method = self._fread(fid,1,'char')
h.ampsensitivity = self._fread(fid,1,'float')
h.lowpass = self._fread(fid,1,'char')
h.highpass = self._fread(fid,1,'char')
h.notch = self._fread(fid,1,'char')
h.autoclipadd = self._fread(fid,1,'char')
h.baseline = self._fread(fid,1,'char')
h.offstart = self._fread(fid,1,'float')
h.offstop = self._fread(fid,1,'float')
h.reject = self._fread(fid,1,'char')
h.rejstart = self._fread(fid,1,'float')
h.rejstop = self._fread(fid,1,'float')
h.rejmin = self._fread(fid,1,'float')
h.rejmax = self._fread(fid,1,'float')
h.trigtype = self._fread(fid,1,'char')
h.trigval = self._fread(fid,1,'float')
h.trigchnl = self._fread(fid,1,'char')
h.trigmask = self._fread(fid,1,'short')
h.trigisi = self._fread(fid,1,'float')
h.trigmin = self._fread(fid,1,'float')
h.trigmax = self._fread(fid,1,'float')
h.trigdir = self._fread(fid,1,'char')
h.autoscale = self._fread(fid,1,'char')
h.n2 = self._fread(fid,1,'short')
h.dir = self._fread(fid,1,'char')
h.dispmin = self._fread(fid,1,'float')
h.dispmax = self._fread(fid,1,'float')
h.xmin = self._fread(fid,1,'float')
h.xmax = self._fread(fid,1,'float')
h.automin = self._fread(fid,1,'float')
h.automax = self._fread(fid,1,'float')
h.zmin = self._fread(fid,1,'float')
h.zmax = self._fread(fid,1,'float')
h.lowcut = self._fread(fid,1,'float')
h.highcut = self._fread(fid,1,'float')
h.common = self._fread(fid,1,'char')
h.savemode = self._fread(fid,1,'char')
h.manmode = self._fread(fid,1,'char')
h.ref = self._fread(fid,10,'char')
h.rectify = self._fread(fid,1,'char')
h.displayxmin = self._fread(fid,1,'float')
h.displayxmax = self._fread(fid,1,'float')
h.phase = self._fread(fid,1,'char')
h.screen = self._fread(fid,16,'char')
h.calmode = self._fread(fid,1,'short')
h.calmethod = self._fread(fid,1,'short')
h.calupdate = self._fread(fid,1,'short')
h.calbaseline = self._fread(fid,1,'short')
h.calsweeps = self._fread(fid,1,'short')
h.calattenuator = self._fread(fid,1,'float')
h.calpulsevolt = self._fread(fid,1,'float')
h.calpulsestart = self._fread(fid,1,'float')
h.calpulsestop = self._fread(fid,1,'float')
h.calfreq = self._fread(fid,1,'float')
h.taskfile = self._fread(fid,34,'char')
h.seqfile = self._fread(fid,34,'char')
h.spectmethod = self._fread(fid,1,'char')
h.spectscaling = self._fread(fid,1,'char')
h.spectwindow = self._fread(fid,1,'char')
h.spectwinlength = self._fread(fid,1,'float')
h.spectorder = self._fread(fid,1,'char')
h.notchfilter = self._fread(fid,1,'char')
h.headgain = self._fread(fid,1,'short')
h.additionalfiles = self._fread(fid,1,'int')
h.unused = self._fread(fid,5,'char')
h.fspstopmethod = self._fread(fid,1,'short')
h.fspstopmode = self._fread(fid,1,'short')
h.fspfvalue = self._fread(fid,1,'float')
h.fsppoint = self._fread(fid,1,'short')
h.fspblocksize = self._fread(fid,1,'short')
h.fspp1 = self._fread(fid,1,'ushort')
h.fspp2 = self._fread(fid,1,'ushort')
h.fspalpha = self._fread(fid,1,'float')
h.fspnoise = self._fread(fid,1,'float')
h.fspv1 = self._fread(fid,1,'short')
h.montage = self._fread(fid,40,'char')
h.eventfile = self._fread(fid,40,'char')
h.fratio = self._fread(fid,1,'float')
h.minor_rev = self._fread(fid,1,'char')
h.eegupdate = self._fread(fid,1,'short')
h.compressed = self._fread(fid,1,'char')
h.xscale = self._fread(fid,1,'float')
h.yscale = self._fread(fid,1,'float')
h.xsize = self._fread(fid,1,'float')
h.ysize = self._fread(fid,1,'float')
h.acmode = self._fread(fid,1,'char')
h.commonchnl = self._fread(fid,1,'uchar')
h.xtics = self._fread(fid,1,'char')
h.xrange = self._fread(fid,1,'char')
h.ytics = self._fread(fid,1,'char')
h.yrange = self._fread(fid,1,'char')
h.xscalevalue = self._fread(fid,1,'float')
h.xscaleinterval = self._fread(fid,1,'float')
h.yscalevalue = self._fread(fid,1,'float')
h.yscaleinterval = self._fread(fid,1,'float')
h.scaletoolx1 = self._fread(fid,1,'float')
h.scaletooly1 = self._fread(fid,1,'float')
h.scaletoolx2 = self._fread(fid,1,'float')
h.scaletooly2 = self._fread(fid,1,'float')
h.port = self._fread(fid,1,'short')
h.numsamples = self._fread(fid,1,'ulong')
h.filterflag = self._fread(fid,1,'char')
h.lowcutoff = self._fread(fid,1,'float')
h.lowpoles = self._fread(fid,1,'short')
h.highcutoff = self._fread(fid,1,'float')
h.highpoles = self._fread(fid,1,'short')
h.filtertype = self._fread(fid,1,'char')
h.filterdomain = self._fread(fid,1,'char')
h.snrflag = self._fread(fid,1,'char')
h.coherenceflag = self._fread(fid,1,'char')
h.continuoustype = self._fread(fid,1,'char')
h.eventtablepos = self._fread(fid,1,'ulong')
h.continuousseconds = self._fread(fid,1,'float')
h.channeloffset = self._fread(fid,1,'long')
h.autocorrectflag = self._fread(fid,1,'char')
h.dcthreshold = self._fread(fid,1,'uchar')
return h
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TPU embedding APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import math
import re
import six
from tensorflow.contrib.framework.python.framework import experimental
from tensorflow.contrib.tpu.ops import gen_tpu_ops
from tensorflow.contrib.tpu.python.ops import tpu_ops
from tensorflow.contrib.tpu.python.tpu import tpu_system_metadata as tpu_system_metadata_lib
from tensorflow.core.protobuf.tpu import optimization_parameters_pb2
from tensorflow.core.protobuf.tpu import tpu_embedding_configuration_pb2 as elc
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
TRAINING = elc.TPUEmbeddingConfiguration.TRAINING
INFERENCE = elc.TPUEmbeddingConfiguration.INFERENCE
class TableConfig(
collections.namedtuple(
'TableConfig',
['vocabulary_size', 'dimension', 'initializer', 'combiner'])):
"""Embedding table configuration."""
@experimental
def __new__(cls,
vocabulary_size,
dimension,
initializer=None,
combiner='mean'):
"""Embedding table configuration.
Args:
vocabulary_size: Number of vocabulary (/rows) in the table.
dimension: The embedding dimension.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.truncated_normal_initializer` with mean `0.0` and standard deviation
`1/sqrt(dimension)`.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently 'mean', 'sqrtn' and 'sum' are supported, with
'mean' the default. 'sqrtn' often achieves good accuracy, in particular
with bag-of-words columns. For more information, see
`tf.nn.embedding_lookup_sparse`.
Returns:
`TableConfig`.
Raises:
ValueError: if `vocabulary_size` is not positive integer.
ValueError: if `dimension` is not positive integer.
ValueError: if `initializer` is specified and is not callable.
ValueError: if `combiner` is not supported.
"""
if not isinstance(vocabulary_size, int) or vocabulary_size < 1:
raise ValueError('Invalid vocabulary_size {}.'.format(vocabulary_size))
if not isinstance(dimension, int) or dimension < 1:
raise ValueError('Invalid dimension {}.'.format(dimension))
if (initializer is not None) and (not callable(initializer)):
raise ValueError('initializer must be callable if specified.')
if initializer is None:
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=1 / math.sqrt(dimension))
if combiner not in ('mean', 'sum', 'sqrtn'):
raise ValueError('Invalid combiner {}'.format(combiner))
return super(TableConfig, cls).__new__(cls, vocabulary_size, dimension,
initializer, combiner)
AdamSlotVariableNames = collections.namedtuple(
'AdamSlotVariableNames', ['m', 'v'])
AdagradSlotVariableName = collections.namedtuple(
'AdagradSlotVariableName', ['accumulator'])
AdamSlotVariables = collections.namedtuple(
'AdamSlotVariables', ['m', 'v'])
AdagradSlotVariable = collections.namedtuple(
'AdagradSlotVariable', ['accumulator'])
VariablesAndOps = collections.namedtuple(
'VariablesAndOps',
['embedding_variables_by_table', 'slot_variables_by_table',
'load_ops', 'retrieve_ops']
)
# TODO(shizhiw): Factor `use_gradient_accumulation` and
# `pipeline_execution_with_tensor_core` out of `_OptimizationParameters`.
class _OptimizationParameters(object):
"""Parameters common to all optimizations."""
def __init__(self, learning_rate, use_gradient_accumulation,
pipeline_execution_with_tensor_core):
self.learning_rate = learning_rate
self.use_gradient_accumulation = use_gradient_accumulation
self.pipeline_execution_with_tensor_core = (
pipeline_execution_with_tensor_core)
class AdagradParameters(_OptimizationParameters):
"""Optimization parameters for Adagrad."""
def __init__(self, learning_rate, initial_accumulator,
use_gradient_accumulation=False,
pipeline_execution_with_tensor_core=True):
"""Optimization parameters for Adagrad.
Args:
learning_rate: used for updating embedding table.
initial_accumulator: initial accumulator for Adagrad.
use_gradient_accumulation: setting this to `True` makes embedding
gradients calculation more accurate but slower. Please see
`optimization_parameters.proto` for details.
for details.
pipeline_execution_with_tensor_core: setting this to `True` makes training
faster, but trained model will be different if step N and step N+1
involve the same set of embedding ID. Please see
`tpu_embedding_configuration.proto` for details.
"""
super(AdagradParameters, self).__init__(learning_rate,
use_gradient_accumulation,
pipeline_execution_with_tensor_core)
self.initial_accumulator = initial_accumulator
class AdamParameters(_OptimizationParameters):
"""Optimization parameters for Adam."""
def __init__(self, learning_rate,
beta1=0.9,
beta2=0.999,
epsilon=1e-08,
lazy_adam=True,
sum_inside_sqrt=True,
use_gradient_accumulation=False,
pipeline_execution_with_tensor_core=True):
"""Optimization parameters for Adam.
Args:
learning_rate: a floating point value. The learning rate.
beta1: A float value.
The exponential decay rate for the 1st moment estimates.
beta2: A float value.
The exponential decay rate for the 2nd moment estimates.
epsilon: A small constant for numerical stability.
lazy_adam: Use lazy Adam instead of Adam. Lazy Adam trains faster.
Please see `optimization_parameters.proto` for details.
sum_inside_sqrt: This improves training speed. Please see
`optimization_parameters.proto` for details.
use_gradient_accumulation: setting this to `True` makes embedding
gradients calculation more accurate but slower. Please see
`optimization_parameters.proto` for details.
for details.
pipeline_execution_with_tensor_core: setting this to `True` makes training
faster, but trained model will be different if step N and step N+1
involve the same set of embedding ID. Please see
`tpu_embedding_configuration.proto` for details.
"""
super(AdamParameters, self).__init__(learning_rate,
use_gradient_accumulation,
pipeline_execution_with_tensor_core)
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.lazy_adam = lazy_adam
self.sum_inside_sqrt = sum_inside_sqrt
class StochasticGradientDescentParameters(_OptimizationParameters):
"""Optimization parameters for stochastic gradient descent.
Args:
learning_rate: a floating point value. The learning rate.
use_gradient_accumulation: setting this to `True` makes embedding
gradients calculation more accurate but slower. Please see
`optimization_parameters.proto` for details.
pipeline_execution_with_tensor_core: setting this to `True` makes training
faster, but trained model will be different if step N and step N+1
involve the same set of embedding ID. Please see
`tpu_embedding_configuration.proto` for details.
"""
def __init__(self, learning_rate, use_gradient_accumulation=False,
pipeline_execution_with_tensor_core=True):
super(StochasticGradientDescentParameters, self).__init__(
learning_rate, use_gradient_accumulation,
pipeline_execution_with_tensor_core)
class TPUEmbedding(object):
"""API for using TPU for embedding.
Example:
```
table_config_user = tpu_embedding.TableConfig(
vocabulary_size=4, dimension=2,
initializer=initializer, combiner='mean')
table_to_config_dict = {'video': table_config_video,
'user': table_config_user}
feature_to_table_dict = {'watched': 'video',
'favorited': 'video',
'friends': 'user'}
batch_size = 4
num_hosts = 1
optimization_parameters = tpu_embedding.AdagradParameters(1., 1.)
mode = tpu_embedding.TRAINING
embedding = tpu_embedding.TPUEmbedding(
table_to_config_dict, feature_to_table_dict,
batch_size, num_hosts, mode, optimization_parameters)
batch_size_per_core = embedding.batch_size_per_core
sparse_features_list = []
for host in hosts:
with ops.device(host):
for _ in range(embedding.num_cores_per_host):
sparse_features = {}
sparse_features['watched'] = sparse_tensor.SparseTensor(...)
sparse_features['favorited'] = sparse_tensor.SparseTensor(...)
sparse_features['friends'] = sparse_tensor.SparseTensor(...)
sparse_features_list.append(sparse_features)
enqueue_ops = embedding.generate_enqueue_ops(sparse_features_list)
embedding_variables_and_ops = embedding.create_variables_and_ops()
def computation():
activations = embedding.get_activations()
loss = compute_loss(activations)
base_optimizer = gradient_descent.GradientDescentOptimizer(
learning_rate=1)
cross_shard_optimizer = tpu_optimizer.CrossShardOptimizer(
base_optimizer)
train_op = cross_shard_optimizer.minimize(loss)
# `train_op` and `send_gradients_op` must happen in order.
with ops.control_dependencies([train_op]):
send_gradients_op = embedding.generate_send_gradients_op()
with ops.control_dependencies([send_gradients_op]):
loss = array_ops.identity(loss)
loss = tpu.shard(computation,
num_shards=embedding.num_cores)
with self.test_session() as sess:
sess.run(tpu.initialize_system(embedding_config=
embedding.config_proto))
sess.run(variables.global_variables_initializer())
sess.run(embedding.init_ops)
sess.run(embedding_variables_and_ops.load_ops())
sess.run(enqueue_ops)
loss_val = sess.run(loss)
```
"""
# TODO(shizhiw): Instead of `feature_to_table_dict` which maps to table
# name, consider `feature_to_config_dict` which maps to `FeatureConfig`.
# `FeatureConfig` could have fields other than table name. For example, it
# could have a field to indicate that the feature should not be used to
# update embedding table (cr/204852758, cr/204940540). Also, this can support
# different combiners for different features within the same table.
# TODO(shizhiw, b/118512626): Remove `batch_size` from `__init__` and move it
# to `FeatureConfig`?
# TODO(shizhiw): will it be cleaner to make `table_to_config_dict` and
# `feature_to_table_dict` lists of `TableSpec` and `FeatureSpec` respectively?
# TODO(shizhiw): Consider adding `input_fn` as an option to remove boilerplate
# for-loops around construction of inputs.
# `optimization_parameter` applies to all tables. If the need arises,
# we can add `optimization_parameters` to `TableConfig` to override this
# global setting.
@experimental
def __init__(self,
table_to_config_dict,
feature_to_table_dict,
batch_size,
mode,
master,
optimization_parameters=None):
"""API for using TPU for embedding lookups.
Args:
table_to_config_dict: A dictionary mapping from string of table name to
`TableConfig`. Table refers to an embedding table, e.g. `params`
argument to `tf.nn.embedding_lookup_sparse()`.
feature_to_table_dict: A dictionary mapping from string of feature name
to string of table name. Feature refers to ids to lookup in embedding
table, e.g. `sp_ids` argument to `tf.nn.embedding_lookup_sparse()`.
batch_size: An `int` representing the global batch size.
mode: `TRAINING` or `INFERENCE`.
master: A `string` representing the TensorFlow master to use.
optimization_parameters: `AdagradParameters`, `AdamParameters`,
`Stochasticgradientdescentparameters`. Must be set in training and must
be `None` in inference.
Raises:
ValueError: if any input is invalid.
"""
_validate_table_to_config_dict(table_to_config_dict)
# Avoid nondeterminism from `Dict` iteration order by using `OrderedDict`.
self._table_to_config_dict = _create_ordered_dict(table_to_config_dict)
self._combiners = _create_combiners(self._table_to_config_dict)
_validate_feature_to_table_dict(table_to_config_dict, feature_to_table_dict)
self._feature_to_table_dict = _create_ordered_dict(feature_to_table_dict)
self._table_to_features_dict = _create_table_to_features_dict(
self._feature_to_table_dict)
self._batch_size = batch_size
self._master = master
self._tpu_system_metadata = (
tpu_system_metadata_lib._query_tpu_system_metadata(self._master)) # pylint: disable=protected-access
if self._tpu_system_metadata.num_cores == 0:
raise ValueError('TPUEmbedding needs TPUs, but master {} does not have '
'TPUs.'.format(self._master))
self._num_hosts = self._tpu_system_metadata.num_hosts
self._hosts = [device.name for device in self._tpu_system_metadata.devices
if 'device:CPU:' in device.name]
self._num_cores_per_host = self._tpu_system_metadata.num_of_cores_per_host
self._num_cores = self._tpu_system_metadata.num_cores
_validate_batch_size(self._batch_size, self._num_cores)
self._batch_size_per_core = self._batch_size // self._num_cores
self._init_ops = []
# TODO(shizhiw): remove `mode`?
if mode == TRAINING:
_validate_optimization_parameters(optimization_parameters)
self._optimization_parameters = optimization_parameters
elif mode == INFERENCE:
if optimization_parameters is not None:
raise ValueError('`optimization_parameters` should be `None` '
'for inference mode.')
self._optimization_parameters = (
StochasticGradientDescentParameters(1.))
else:
raise ValueError('`mode` only supports {} and {}; got {}.'
.format(TRAINING, INFERENCE, mode))
self._mode = mode
# TODO(shizhiw): move `optimization_parameters` into `_optimizer_handler`
# and create special handler for inference that inherits from
# StochasticGradientDescentHandler with more user-friendly error message
# on get_slot().
self._optimizer_handler = _get_optimization_handler(
self._optimization_parameters)
dummy_table_variables_init_op = self._create_dummy_table_variables()
self._init_ops.append(dummy_table_variables_init_op)
self._config_proto = self._create_config_proto()
@property
def hosts(self):
"""A list of device names for CPU hosts.
Returns:
A list of device names for CPU hosts.
"""
return copy.copy(self._hosts)
# TODO(shizhiw): change to num_tensor_cores_per_host to be more explicit and
# to be consistent with `tpu_embedding_configuration.proto`.
@property
def num_cores_per_host(self):
"""Number of TPU cores on a CPU host.
Returns:
Number of TPU cores on a CPU host.
"""
return self._num_cores_per_host
@property
def num_cores(self):
"""Total number of TPU cores on all hosts.
Returns:
Total number of TPU cores on all hosts.
"""
return self._num_cores
@property
def batch_size_per_core(self):
"""Batch size for each TPU core.
The sparse tensors in `sparse_features_list` to `generate_enqueue_ops`
must have batch dimension equal to this.
Returns:
Batch size for each TPU core.
"""
return self._batch_size_per_core
@property
def config_proto(self):
"""Create embedding config proto for `tpu.initialize_system()`.
Returns:
an `TPUEmbeddingConfiguration` proto describing the desired
configuration of the hardware embedding lookup tables, which
is passed to `tpu.initialize_system()`.
"""
return self._config_proto
@property
def init_ops(self):
"""Initialization ops for TPU embedding.
It must be called after all global variables have been initialized,
i.e. after `global_variables_initializer()`, as it loads embedding
tables into TPU.
Returns:
A list of ops.
"""
return self._init_ops
@property
def table_to_config_dict(self):
return copy.copy(self._table_to_config_dict)
@property
def feature_to_table_dict(self):
return copy.copy(self._feature_to_table_dict)
@property
def optimization_parameters(self):
return self._optimization_parameters
def _create_config_proto(self):
"""Create `TPUEmbeddingConfiguration`."""
config_proto = elc.TPUEmbeddingConfiguration()
for table in self._table_to_config_dict:
table_descriptor = config_proto.table_descriptor.add()
table_descriptor.name = table
table_config = self._table_to_config_dict[table]
table_descriptor.vocabulary_size = table_config.vocabulary_size
table_descriptor.dimension = table_config.dimension
features_for_table = self._table_to_features_dict[table]
table_descriptor.num_features = len(features_for_table)
table_descriptor.optimization_parameters.learning_rate.constant = (
self._optimization_parameters.learning_rate)
table_descriptor.optimization_parameters.gradient_accumulation_status = (
optimization_parameters_pb2.GradientAccumulationStatus.ENABLED
if self._optimization_parameters.use_gradient_accumulation else
optimization_parameters_pb2.GradientAccumulationStatus.DISABLED)
# For compatibility with old TPU workers.
table_descriptor.optimization_parameters.use_gradient_accumulation = (
self._optimization_parameters.use_gradient_accumulation)
self._optimizer_handler.set_optimization_parameters(table_descriptor)
config_proto.mode = self._mode
config_proto.batch_size_per_tensor_core = self._batch_size_per_core
config_proto.num_hosts = self._num_hosts
config_proto.num_tensor_cores = self._num_cores
config_proto.sharding_strategy = elc.TPUEmbeddingConfiguration.DIV_DEFAULT
config_proto.pipeline_execution_with_tensor_core = (
self._optimization_parameters.pipeline_execution_with_tensor_core)
return config_proto
def create_variables_and_ops(self, embedding_variable_name_by_table=None,
slot_variable_names_by_table=None):
"""Create embedding and slot variables, with ops to load and retrieve them.
Args:
embedding_variable_name_by_table: A dictionary mapping from string of
table name to string of embedding variable name. If `None`,
defaults from `get_default_slot_variable_names()` will be used.
slot_variable_names_by_table: A dictionary mapping from string of table
name to `AdamSlotVariableNames`, `AdagradSlotVariableNames` etc. If
`None`, defaults from `get_default_slot_variable_names()` will be used.
Returns:
`tpu_embedding.VariablesAndOps` with:
A dictionary mapping from string of table name to embedding variables,
A dictionary mapping from string of table name to AdagradSlotVariable,
AdamSlotVariables etc with slot variables,
A function which returns a list of ops to load embedding and slot
variables from TPU to CPU.
A function which returns a list of ops to retrieve embedding and slot
variables from TPU to CPU.
"""
embedding_variables_by_table = {}
slot_variables_by_table = {}
load_op_fns = []
retrieve_op_fns = []
for table in self._table_to_config_dict:
if embedding_variable_name_by_table:
embedding_variable_name = embedding_variable_name_by_table[table]
else:
embedding_variable_name = table
if slot_variable_names_by_table:
slot_variable_names = slot_variable_names_by_table[table]
else:
slot_variable_names = (
self._optimizer_handler.get_default_slot_variable_names(table))
device_fn = _create_device_fn(self._hosts)
with ops.device(device_fn):
table_variables = _create_partitioned_variables(
name=embedding_variable_name,
num_hosts=self._num_hosts,
vocabulary_size=self._table_to_config_dict[table].vocabulary_size,
embedding_dimension=self._table_to_config_dict[table].dimension,
initializer=self._table_to_config_dict[table].initializer,
collections=[ops.GraphKeys.GLOBAL_VARIABLES])
embedding_variables_by_table[table] = table_variables
slot_variables_for_table, load_ops_fn, retrieve_ops_fn = (
self._optimizer_handler.create_variables_and_ops(
table, slot_variable_names, self._num_hosts,
self._table_to_config_dict[table], table_variables)
)
slot_variables_by_table[table] = slot_variables_for_table
load_op_fns.append(load_ops_fn)
retrieve_op_fns.append(retrieve_ops_fn)
def load_ops():
"""Calls and returns the load ops for each embedding table.
Returns:
A list of ops to load embedding and slot variables from CPU to TPU.
"""
load_ops_list = []
for load_op_fn in load_op_fns:
load_ops_list.extend(load_op_fn())
return load_ops_list
def retrieve_ops():
"""Calls and returns the retrieve ops for each embedding table.
Returns:
A list of ops to retrieve embedding and slot variables from TPU to CPU.
"""
retrieve_ops_list = []
for retrieve_op_fn in retrieve_op_fns:
retrieve_ops_list.extend(retrieve_op_fn())
return retrieve_ops_list
return VariablesAndOps(embedding_variables_by_table,
slot_variables_by_table,
load_ops, retrieve_ops)
def _create_dummy_table_variables(self):
"""Create dummy embedding table variables.
The sole purpose of these dummy variables are to trigger gradient
calcuation wrt them so that the gradients wrt activation can be captured
and later sent to TPU embedding.
Returns:
Initializer for these variables.
Raises:
RuntimeError: if collection to store gradients already exists and is not
empty.
"""
self._dummy_table_variables = []
# TODO(shizhiw): remove table id.
for table_id, table in enumerate(self._table_to_features_dict):
self._dummy_table_variables.append(
variable_scope.get_variable(
'tpu_embedding_dummy_table_variable_%s' % table,
dtype=dtypes.float32,
shape=[1],
use_resource=True,
trainable=True,
# TODO(shizhiw): Remove these dummy variables as
# tensorflow optimizer creates slot variable for them which
# is undesirable.
# e.g. tpu_embedding_dummy_table_variable_mlp_user/Adam{_1}.
# Explicitly specifying collections prevents this variable from
# being added to the GLOBAL_VARIABLES collection, so that Saver()
# ignores it.
collections=['tpu_embedding_dummy_table_variables']))
g = ops.get_default_graph()
table_gradients = g.get_collection_ref(
'tpu_embedding_gradients_table_%d' % table_id)
if table_gradients:
raise RuntimeError(
'tpu_embedding_gradients_table_%d is not empty.' % table_id)
table_gradients.extend([None] * len(self._table_to_features_dict[table]))
return variables.variables_initializer(
self._dummy_table_variables,
name='tpu_embedding_dummy_table_variables_init')
def generate_enqueue_ops(self, sparse_features_list):
"""Generate enqueue ops.
Args:
sparse_features_list: a list of dictionary mapping from string
of feature names to sparse tensor. Each dictionary is for one
TPU core. Dictionaries for the same core should be contiguous
on the list.
Returns:
Ops to enqueue to TPU for embedding.
"""
self._validate_generate_enqueue_ops_sparse_features_list(
sparse_features_list)
return [
self._generate_enqueue_op(
sparse_features, device_ordinal=i % self._num_cores_per_host)
for i, sparse_features in enumerate(sparse_features_list)
]
def _validate_generate_enqueue_ops_sparse_features_list(
self, sparse_features_list):
"""Validate `sparse_features_list`."""
if len(sparse_features_list) != self._num_cores:
raise ValueError('Length of `sparse_features_list` should match the '
'number of cores; '
'`len(sparse_features_list)` is {}, '
'number of cores is {}.'.format(
len(sparse_features_list), self._num_cores))
feature_set = set(self._feature_to_table_dict.keys())
contiguous_device = None
for i, sparse_features in enumerate(sparse_features_list):
used_feature_set = set(sparse_features.keys())
# Check features are valid.
missing_feature_set = feature_set - used_feature_set
if missing_feature_set:
raise ValueError('`sparse_features_list[{}]` misses a feature that is '
'in `feature_to_config_dict`: {}.'.format(
i, missing_feature_set))
extra_feature_set = used_feature_set - feature_set
if extra_feature_set:
raise ValueError('`sparse_features_list[{}]` has a feature that is not '
'in `feature_to_config_dict`: {}.'.format(
i, extra_feature_set))
device = None
device_feature = None
for feature, tensor in six.iteritems(sparse_features):
if not isinstance(tensor, sparse_tensor.SparseTensor):
raise ValueError('`sparse_features_list[{}]` has a feature that is '
'not mapped to `SparseTensor`. '
'`feature`: {}, type: {}'.format(
i, feature, type(tensor)))
# Check all features are on the same device.
if device is None:
device = tensor.op.device
device_feature = feature
else:
if device != tensor.op.device:
raise ValueError('Devices are different between features in '
'`sparse_features_list[{}]`; '
'devices: {}, {}; features: {}, {}.'.format(
i, device, tensor.op.device, feature,
device_feature))
if i % self._num_cores_per_host:
if device != contiguous_device:
raise ValueError('We expect the `sparse_features` which are on the '
'same host to be contiguous in '
'`sparse_features_list`, '
'`sparse_features_list[{}]` is on device {}, '
'but is expected to be on device {}.'.format(
i, device, contiguous_device))
else:
contiguous_device = device
def _generate_enqueue_op(self, sparse_features, device_ordinal):
with ops.colocate_with(list(sparse_features.values())[0]):
sample_idcs, embedding_idcs, aggregation_weights = (
self._format_for_tpu_embedding_sparse_batch(sparse_features))
return tpu_ops.enqueue_tpu_embedding_sparse_batch(
sample_idcs,
embedding_idcs,
aggregation_weights,
combiners=self._combiners,
device_ordinal=device_ordinal)
def _format_for_tpu_embedding_sparse_batch(self, sparse_features):
"""Format sparse features for `enqueue_tpu_embedding_sparse_batch()`.
Args:
sparse_features: a `Dict` of `SparseTensor`s for embedding.
Returns:
Arguments for `enqueue_tpu_embedding_sparse_batch()`.
"""
sample_idcs, embedding_idcs, aggregation_weights = list(), list(), list()
for table in self._table_to_features_dict:
sample_t, indices_t, weights_t = list(), list(), list()
features = self._table_to_features_dict[table]
for i, feature in enumerate(features):
tensor = sparse_features[feature]
sample_indices = tensor.indices[:, 0]
embedding_indices = tensor.values
weights = array_ops.ones_like(embedding_indices)
sample_t.append(i * self._batch_size_per_core + sample_indices)
indices_t.append(embedding_indices)
weights_t.append(weights)
sample_idcs.append(
math_ops.cast(array_ops.concat(sample_t, axis=0), dtype=dtypes.int32))
embedding_idcs.append(
math_ops.cast(
array_ops.concat(indices_t, axis=0), dtype=dtypes.int32))
aggregation_weights.append(
math_ops.cast(
array_ops.concat(weights_t, axis=0), dtype=dtypes.float32))
return sample_idcs, embedding_idcs, aggregation_weights
def get_activations(self):
"""Get activations for features.
This should be called within `computation` that is passed to
`tpu.replicate` and friends.
Returns:
A dictionary mapping from `String` of feature name to `Tensor`
of activation.
"""
recv_activations = tpu_ops.recv_tpu_embedding_activations(
num_outputs=len(self._table_to_config_dict),
config=self._config_proto.SerializeToString())
activations = collections.OrderedDict()
for table_id, table in enumerate(self._table_to_features_dict):
features = self._table_to_features_dict[table]
for lookup_id, feature in enumerate(features):
start_row = lookup_id * self._batch_size_per_core
end_row = start_row + self._batch_size_per_core
activations[feature] = gen_tpu_ops.tpu_embedding_activations(
self._dummy_table_variables[table_id],
recv_activations[table_id][start_row:end_row, :],
table_id=table_id,
lookup_id=lookup_id)
return activations
# TODO(shizhiw): Make `gradient_multiplier` per feature. Setting it to 0 would
# have the effect of `tf.stop_gradients()`.
# TODO(shizhiw): Consider alternative ways to capture gradients wrt embedding
# layer outputs to remove `_dummy_table_variables`,
# `_embedding_activation_grad` and `tpu_embedding_gradients_table_%d'.
def generate_send_gradients_op(self, gradient_multipliers=None):
"""Retrieve gradients from collections and send them to TPU embedding.
Args:
gradient_multipliers: None, or dict mapping table names to gradient
multiplier Tensors.
Returns:
SendTPUEmbeddingGradients Op.
Raises:
ValueError: If required gradients have not been defined.
RuntimeError: If `mode` is not `TRAINING`.
"""
if self._mode != TRAINING:
raise RuntimeError('Only in training mode gradients need to '
'be sent to TPU embedding; got mode {}.'
.format(self._mode))
g = ops.get_default_graph()
gradients = list()
for table_id, table in enumerate(self._table_to_config_dict):
table_gradients = g.get_collection(
'tpu_embedding_gradients_table_%d' % table_id)
if any(gradient is None for gradient in table_gradients):
raise ValueError(
'Table {}/{} has undefined gradients: this is probably because the '
'model asked TPUEmbedding to compute activations that were not '
'used.'.format(table_id, table))
concat_table_grads = array_ops.concat(table_gradients, axis=0)
if gradient_multipliers is not None:
concat_table_grads *= gradient_multipliers[table.name]
gradients.append(concat_table_grads)
return tpu_ops.send_tpu_embedding_gradients(
inputs=gradients, config=self.config_proto.SerializeToString())
def _validate_table_to_config_dict(table_to_config_dict):
"""Validate `table_to_config_dict`."""
for k, v in six.iteritems(table_to_config_dict):
if not isinstance(v, TableConfig):
raise ValueError('Value of `table_to_config_dict` must be of type '
'`TableConfig`, got {} for {}.'.format(type(v), k))
def _validate_feature_to_table_dict(table_to_config_dict,
feature_to_table_dict):
"""Validate `feature_to_table_dict`."""
used_table_set = set(feature_to_table_dict.values())
table_set = set(table_to_config_dict.keys())
unused_table_set = table_set - used_table_set
if unused_table_set:
raise ValueError('`table_to_config_dict` specifies table that is not '
'used in `feature_to_table_dict`: {}.'
.format(unused_table_set))
extra_table_set = used_table_set - table_set
if extra_table_set:
raise ValueError('`feature_to_table_dict` refers to a table that is not '
'specified in `table_to_config_dict`: {}.'
.format(extra_table_set))
def _validate_batch_size(batch_size, num_cores):
if batch_size % num_cores:
raise ValueError('`batch_size` is not a multiple of number of '
'cores. `batch_size`={}, `_num_cores`={}.'.format(
batch_size, num_cores))
def _validate_optimization_parameters(optimization_parameters):
if not isinstance(optimization_parameters, _OptimizationParameters):
raise ValueError('`optimization_parameters` must inherit from '
'`_OptimizationPramaters`. '
'`type(optimization_parameters)`={}'.format(
type(optimization_parameters)))
class _OptimizerHandler(object):
"""Interface class for handling optimizer specific logic."""
def __init__(self, optimization_parameters):
self._optimization_parameters = optimization_parameters
def set_optimization_parameters(self, table_descriptor):
raise NotImplementedError()
def get_default_slot_variable_names(self, table):
raise NotImplementedError()
def create_variables_and_ops(self, table, slot_variable_names, num_hosts,
table_config, table_variables):
raise NotImplementedError()
class _AdagradHandler(_OptimizerHandler):
"""Handles Adagrad specific logic."""
def __init__(self, optimization_parameters):
super(_AdagradHandler, self).__init__(optimization_parameters)
self._table_to_accumulator_variables_dict = {}
def set_optimization_parameters(self, table_descriptor):
table_descriptor.optimization_parameters.adagrad.SetInParent()
def get_default_slot_variable_names(self, table):
return AdagradSlotVariableName('{}/{}'.format(table, 'Adagrad'))
def create_variables_and_ops(self, table, slot_variable_names, num_hosts,
table_config, table_variables):
accumulator_initializer = init_ops.constant_initializer(
self._optimization_parameters.initial_accumulator)
accumulator_variables = _create_partitioned_variables(
name=slot_variable_names.accumulator,
num_hosts=num_hosts,
vocabulary_size=table_config.vocabulary_size,
embedding_dimension=table_config.dimension,
collections=[ops.GraphKeys.GLOBAL_VARIABLES],
initializer=accumulator_initializer)
slot_variables = AdagradSlotVariable(accumulator_variables)
def load_ops_fn():
"""Returns the retrieve ops for AdaGrad embedding tables.
Returns:
A list of ops to load embedding and slot variables from CPU to TPU.
"""
load_op_list = []
for host_id, table_variable, accumulator_variable in (zip(
range(num_hosts), table_variables, accumulator_variables)):
with ops.colocate_with(table_variable):
load_parameters_op = (
tpu_ops.load_tpu_embedding_adagrad_parameters(
parameters=table_variable,
accumulators=accumulator_variable,
table_name=table,
num_shards=num_hosts,
shard_id=host_id))
load_op_list.append(load_parameters_op)
return load_op_list
def retrieve_ops_fn():
"""Returns the retrieve ops for AdaGrad embedding tables.
Returns:
A list of ops to retrieve embedding and slot variables from TPU to CPU.
"""
retrieve_op_list = []
for host_id, table_variable, accumulator_variable in (zip(
range(num_hosts), table_variables, accumulator_variables)):
with ops.colocate_with(table_variable):
retrieved_table, retrieved_accumulator = (
tpu_ops.retrieve_tpu_embedding_adagrad_parameters(
table_name=table,
num_shards=num_hosts,
shard_id=host_id))
retrieve_parameters_op = control_flow_ops.group(
state_ops.assign(table_variable, retrieved_table),
state_ops.assign(accumulator_variable, retrieved_accumulator))
retrieve_op_list.append(retrieve_parameters_op)
return retrieve_op_list
return slot_variables, load_ops_fn, retrieve_ops_fn
class _AdamHandler(_OptimizerHandler):
"""Handles Adam specific logic."""
def __init__(self, optimization_parameters):
super(_AdamHandler, self).__init__(optimization_parameters)
self._table_to_m_variables_dict = {}
self._table_to_v_variables_dict = {}
def set_optimization_parameters(self, table_descriptor):
table_descriptor.optimization_parameters.adam.beta1 = (
self._optimization_parameters.beta1)
table_descriptor.optimization_parameters.adam.beta2 = (
self._optimization_parameters.beta2)
table_descriptor.optimization_parameters.adam.epsilon = (
self._optimization_parameters.epsilon)
table_descriptor.optimization_parameters.adam.use_non_lazy_adam = (
not self._optimization_parameters.lazy_adam)
table_descriptor.optimization_parameters.adam.use_sum_inside_sqrt = (
self._optimization_parameters.sum_inside_sqrt)
def get_default_slot_variable_names(self, table):
return AdamSlotVariableNames('{}/{}/m'.format(table, 'Adam'),
'{}/{}/v'.format(table, 'Adam'))
def create_variables_and_ops(self, table, slot_variable_names, num_hosts,
table_config, table_variables):
m_initializer = init_ops.zeros_initializer()
m_variables = _create_partitioned_variables(
name=slot_variable_names.m,
num_hosts=num_hosts,
vocabulary_size=table_config.vocabulary_size,
embedding_dimension=table_config.dimension,
collections=[ops.GraphKeys.GLOBAL_VARIABLES],
initializer=m_initializer)
v_initializer = init_ops.zeros_initializer()
v_variables = _create_partitioned_variables(
name=slot_variable_names.v,
num_hosts=num_hosts,
vocabulary_size=table_config.vocabulary_size,
embedding_dimension=table_config.dimension,
collections=[ops.GraphKeys.GLOBAL_VARIABLES],
initializer=v_initializer)
slot_variables = AdamSlotVariables(m_variables, v_variables)
def load_ops_fn():
"""Returns the retrieve ops for AdaGrad embedding tables.
Returns:
A list of ops to load embedding and slot variables from CPU to TPU.
"""
load_op_list = []
for host_id, table_variable, m_variable, v_variable in (zip(
range(num_hosts), table_variables,
m_variables, v_variables)):
with ops.colocate_with(table_variable):
load_parameters_op = (
tpu_ops.load_tpu_embedding_adam_parameters(
parameters=table_variable,
momenta=m_variable,
velocities=v_variable,
table_name=table,
num_shards=num_hosts,
shard_id=host_id))
load_op_list.append(load_parameters_op)
return load_op_list
def retrieve_ops_fn():
"""Returns the retrieve ops for Adam embedding tables.
Returns:
A list of ops to retrieve embedding and slot variables from TPU to CPU.
"""
retrieve_op_list = []
for host_id, table_variable, m_variable, v_variable in (zip(
range(num_hosts), table_variables,
m_variables, v_variables)):
with ops.colocate_with(table_variable):
retrieved_table, retrieved_m, retrieved_v = (
tpu_ops.retrieve_tpu_embedding_adam_parameters(
table_name=table,
num_shards=num_hosts,
shard_id=host_id))
retrieve_parameters_op = control_flow_ops.group(
state_ops.assign(table_variable, retrieved_table),
state_ops.assign(m_variable, retrieved_m),
state_ops.assign(v_variable, retrieved_v))
retrieve_op_list.append(retrieve_parameters_op)
return retrieve_op_list
return slot_variables, load_ops_fn, retrieve_ops_fn
class _StochasticGradientDescentHandler(_OptimizerHandler):
"""Handles stochastic gradient descent specific logic."""
def set_optimization_parameters(self, table_descriptor):
(table_descriptor.optimization_parameters.stochastic_gradient_descent
.SetInParent())
def get_default_slot_variable_names(self, table):
return None
def create_variables_and_ops(self, table, slot_variable_names, num_hosts,
table_config, table_variables):
del table_config
def load_ops_fn():
"""Returns the retrieve ops for AdaGrad embedding tables.
Returns:
A list of ops to load embedding and slot variables from CPU to TPU.
"""
load_op_list = []
for host_id, table_variable in (zip(
range(num_hosts), table_variables)):
with ops.colocate_with(table_variable):
load_parameters_op = (
tpu_ops
.load_tpu_embedding_stochastic_gradient_descent_parameters(
parameters=table_variable,
table_name=table,
num_shards=num_hosts,
shard_id=host_id))
load_op_list.append(load_parameters_op)
return load_op_list
def retrieve_ops_fn():
"""Returns the retrieve ops for SGD embedding tables.
Returns:
A list of ops to retrieve embedding and slot variables from TPU to CPU.
"""
retrieve_op_list = []
for host_id, table_variable in (zip(
range(num_hosts), table_variables)):
with ops.colocate_with(table_variable):
retrieved_table = (
tpu_ops
.retrieve_tpu_embedding_stochastic_gradient_descent_parameters(
table_name=table,
num_shards=num_hosts,
shard_id=host_id))
retrieve_parameters_op = control_flow_ops.group(
state_ops.assign(table_variable, retrieved_table))
retrieve_op_list.append(retrieve_parameters_op)
return retrieve_op_list
return None, load_ops_fn, retrieve_ops_fn
def _get_optimization_handler(optimization_parameters):
if isinstance(optimization_parameters, AdagradParameters):
return _AdagradHandler(optimization_parameters)
elif isinstance(optimization_parameters, AdamParameters):
return _AdamHandler(optimization_parameters)
elif isinstance(optimization_parameters, StochasticGradientDescentParameters):
return _StochasticGradientDescentHandler(optimization_parameters)
else:
return NotImplementedError()
def _create_ordered_dict(d):
"""Create an OrderedDict from Dict."""
return collections.OrderedDict((k, d[k]) for k in sorted(d))
def _create_combiners(table_to_config_dict):
return [table_to_config_dict[t].combiner for t in table_to_config_dict]
def _create_table_to_features_dict(feature_to_table_dict):
"""Create mapping from table to a list of its features."""
table_to_features_dict_tmp = {}
for feature, table in six.iteritems(feature_to_table_dict):
if table in table_to_features_dict_tmp:
table_to_features_dict_tmp[table].append(feature)
else:
table_to_features_dict_tmp[table] = [feature]
table_to_features_dict = collections.OrderedDict()
for table in sorted(table_to_features_dict_tmp):
table_to_features_dict[table] = sorted(table_to_features_dict_tmp[table])
return table_to_features_dict
def _create_device_fn(hosts):
"""Create device_fn() to use with _create_partitioned_variables()."""
def device_fn(op):
"""Returns the `device` for `op`."""
part_match = re.match(r'.*/part_(\d+)(/|$)', op.name)
if part_match:
idx = int(part_match.group(1))
else:
raise RuntimeError('Internal Error: '
'Expected %s to contain /part_*.' % op.name)
device = hosts[idx]
return device
return device_fn
def _create_partitioned_variables(name,
num_hosts,
vocabulary_size,
embedding_dimension,
initializer,
collections=None): # pylint: disable=redefined-outer-name
"""Creates ParitionedVariables based on `num_hosts` for `table`."""
# TODO(shizhiw): automatically place embedding lookup elsewhere?
if vocabulary_size < num_hosts:
raise ValueError('`vocabulary_size`({}) is smaller than `num_hosts`({}). '
'As TPU embedding is not optimized for small tables, '
'please consider other ways for this embedding lookup.')
return list(variable_scope.get_variable(
name,
shape=(vocabulary_size, embedding_dimension),
partitioner=partitioned_variables.fixed_size_partitioner(num_hosts),
dtype=dtypes.float32,
initializer=initializer,
collections=collections,
trainable=False))
|
|
import sys
import numpy as np
from scipy import stats
#from scikits.statsmodels.iolib.table import SimpleTable
from scikits.statsmodels.iolib.table import SimpleTable
def _kurtosis(a):
'''wrapper for scipy.stats.kurtosis that returns nan instead of raising Error
missing options
'''
try:
res = stats.kurtosis(a)
except ValueError:
res = np.nan
return res
def _skew(a):
'''wrapper for scipy.stats.skew that returns nan instead of raising Error
missing options
'''
try:
res = stats.skew(a)
except ValueError:
res = np.nan
return res
class Describe(object):
'''
Calculates descriptive statistics for data.
Defaults to a basic set of statistics, "all" can be specified, or a list can
be given.
dataset : can be either a structured or ndarray (Larry?), observations in
rows, variables in columns.
'''
def __init__(self, dataset):
self.dataset = dataset
#better if this is initially a list to define order, or use an ordered dict
# First position is the function
# Second position is the tuple/list of column names/numbers
# third is are the results in order of the columns
self.univariate = dict(
obs = [len, None, None],
mean = [np.mean, None, None],
std = [np.std, None, None],
min = [np.min, None, None],
max = [np.max, None, None],
ptp = [np.ptp, None, None],
var = [np.var, None, None],
mode_val = [self._mode_val, None, None],
mode_bin = [self._mode_bin, None, None],
median = [np.median, None, None],
skew = [stats.skew, None, None],
uss = [stats.ss, None, None],
kurtosis = [stats.kurtosis, None, None],
percentiles = [self._percentiles, None, None], #BUG: not single value
#sign_test_M = [self.sign_test_m, None, None],
#sign_test_P = [self.sign_test_p, None, None]
)
#TODO: Basic stats for strings
#self.strings = dict(
#unique = [np.unique, None, None],
#number_uniq = [len(
#most = [
#least = [
#TODO: Multivariate
#self.multivariate = dict(
#corrcoef(x[, y, rowvar, bias]),
#cov(m[, y, rowvar, bias]),
#histogram2d(x, y[, bins, range, normed, weights])
#)
self._arraytype = None
self._columns_list = None
def _percentiles(self,x):
p = [stats.scoreatpercentile(x,per) for per in
(1,5,10,25,50,75,90,95,99)]
return p
def _mode_val(self,x):
return stats.mode(x)[0][0]
def _mode_bin(self,x):
return stats.mode(x)[1][0]
def _array_typer(self):
"""if not a sctructured array"""
if not(self.dataset.dtype.names):
"""homogeneous dtype array"""
self._arraytype = 'homog'
elif self.dataset.dtype.names:
"""structured or rec array"""
self._arraytype = 'sctruct'
else:
assert self._arraytype == 'sctruct' or self._arraytype == 'homog'
def _is_dtype_like(self, col):
"""
Check whether self.dataset.[col][0] behaves like a string, numbern unknown.
`numpy.lib._iotools._is_string_like`
"""
def string_like():
#TODO: not sure what the result is if the first item is some type of missing value
try:
self.dataset[col][0] + ''
except (TypeError, ValueError):
return False
return True
def number_like():
try:
self.dataset[col][0] + 1.0
except (TypeError, ValueError):
return False
return True
if number_like()==True and string_like()==False:
return 'number'
elif number_like()==False and string_like()==True:
return 'string'
else:
assert (number_like()==True or string_like()==True), '\
Not sure of dtype'+str(self.dataset[col][0])
#@property
def summary(self, stats='basic', columns='all', orientation='auto'):
"""
prints a table of summary statistics and stores the stats.
stats: The desired statistics, A list[] or 'basic' or 'all' are options
'basic' = ('obs', 'mean', 'std', 'min', 'max')
'all' = ('obs', 'mean', 'std', 'min', 'max', 'ptp', 'var', 'mode',
'meadian', 'skew', 'uss', 'kurtosis', 'percentiles')
Columns: The columns/variables to report the statistics, default is 'all'
structured array: specify the column names
summary(stats='basic', columns=['alpha', 'beta'])
standard array: Specifiy column numbers (NEED TO TEST)
percentiles currently broken
mode requires mode_val and mode_bin separately
"""
if self._arraytype == None:
self._array_typer()
if stats == 'basic':
stats = ('obs', 'mean', 'std', 'min', 'max')
elif stats == 'all':
#stats = self.univariate.keys()
#dict doesn't keep an order, use full list instead
stats = ['obs', 'mean', 'std', 'min', 'max', 'ptp', 'var', 'mode_val', 'mode_bin',
'median', 'uss', 'skew', 'kurtosis', 'percentiles']
else:
for astat in stats:
pass
#assert astat in self.univariate
#hack around percentiles multiple output
#bad naming
import scipy.stats
#BUG: the following has all per the same per=99
## perdict = dict(('perc_%2d'%per, [lambda x: scipy.stats.scoreatpercentile(x, per),
## None, None])
## for per in (1,5,10,25,50,75,90,95,99))
def _fun(per):
return lambda x: scipy.stats.scoreatpercentile(x, per)
perdict = dict(('perc_%02d'%per, [_fun(per), None, None])
for per in (1,5,10,25,50,75,90,95,99))
if 'percentiles' in stats:
self.univariate.update(perdict)
idx = stats.index('percentiles')
stats[idx:idx+1] = sorted(perdict.keys())
#JP: this doesn't allow a change in sequence, sequence in stats is ignored
#this is just an if condition
if any([aitem[1] for aitem in self.univariate.items() if aitem[0] in stats]):
if columns == 'all':
self._columns_list = []
if self._arraytype == 'sctruct':
self._columns_list = self.dataset.dtype.names
#self._columns_list = [col for col in self.dataset.dtype.names if
#(self._is_dtype_like(col)=='number')]
else:
self._columns_list = range(self.dataset.shape[1])
else:
self._columns_list = columns
if self._arraytype == 'sctruct':
for col in self._columns_list:
assert (col in self.dataset.dtype.names)
else:
assert self._is_dtype_like(self.dataset) == 'number'
columstypes = self.dataset.dtype
#TODO: do we need to make sure they dtype is float64 ?
for astat in stats:
calc = self.univariate[astat]
if self._arraytype == 'sctruct':
calc[1] = self._columns_list
calc[2] = [calc[0](self.dataset[col]) for col in
self._columns_list if (self._is_dtype_like(col) ==
'number')]
#calc[2].append([len(np.unique(self.dataset[col])) for col
#in self._columns_list if
#self._is_dtype_like(col)=='string']
else:
calc[1] = ['Col '+str(col) for col in self._columns_list]
calc[2] = [calc[0](self.dataset[:,col]) for col in self._columns_list]
return self.print_summary(stats, orientation=orientation)
else:
return self.print_summary(stats, orientation=orientation)
def print_summary(self, stats, orientation='auto'):
#TODO: need to specify a table formating for the numbers, using defualt
title = 'Summary Statistics'
header = stats
stubs = self.univariate['obs'][1]
data = [[self.univariate[astat][2][col] for astat in stats] for col in
range(len(self.univariate['obs'][2]))]
if (orientation == 'varcols') or \
(orientation == 'auto' and len(stubs) < len(header)):
#swap rows and columns
data = map(lambda *row: list(row), *data)
header, stubs = stubs, header
part_fmt = dict(data_fmts = ["%#8.4g"]*(len(header)-1))
table = SimpleTable(data,
header,
stubs,
title=title,
txt_fmt = part_fmt)
return table
def sign_test(samp,mu0=0):
'''
Signs test with mu0=0 by default (though
the median is often used in practice)
Parameters
----------
samp
mu0
Returns
---------
M, p-value
where
M=(N(+) - N(-))/2, N(+) is the number of values above Mu0,
N(-) is the number of values below. Values equal to Mu0
are discarded.
The p-value for M is calculated using the binomial distrubution
and can be intrepreted the same as for a t-test.
See Also
---------
scipy.stats.wilcoxon
'''
pos=np.sum(samp>mu0)
neg=np.sum(samp<mu0)
M=(pos-neg)/2.
p=stats.binom_test(min(pos,neg),pos+neg,.5)
return M, p
#TODO: There must be a better way but formating the stats of a fuction that
# returns 2 values is a problem.
#def sign_test_m(samp,mu0=0):
#return self.sign_test(samp,mu0)[0]
#def sign_test_p(samp,mu0=0):
#return self.sign_test(samp,mu0)[1]
########################################
########################################
import unittest
data1 = np.array([(1,2,'a','aa'),
(2,3,'b','bb'),
(2,4,'b','cc')],
dtype = [('alpha',float), ('beta', int),
('gamma', '|S1'), ('delta', '|S2')])
data2 = np.array([(1,2),
(2,3),
(2,4)],
dtype = [('alpha',float), ('beta', float)])
data3 = np.array([[1,2,4,4],
[2,3,3,3],
[2,4,4,3]], dtype=float)
data4 = np.array([[1,2,3,4,5,6],
[6,5,4,3,2,1],
[9,9,9,9,9,9]])
class TestSimpleTable(unittest.TestCase):
#from scikits.statsmodels.iolib.table import SimpleTable, default_txt_fmt
def test_basic_1(self):
print('test_basic_1')
t1 = Describe(data1)
print(t1.summary())
def test_basic_2(self):
print('test_basic_2')
t2 = Describe(data2)
print(t2.summary())
def test_basic_3(self):
print('test_basic_3')
t1 = Describe(data3)
print(t1.summary())
def test_basic_4(self):
print('test_basic_4')
t1 = Describe(data4)
print(t1.summary())
def test_basic_1a(self):
print('test_basic_1a')
t1 = Describe(data1)
print(t1.summary(stats='basic', columns=['alpha']))
def test_basic_1b(self):
print('test_basic_1b')
t1 = Describe(data1)
print(t1.summary(stats='basic', columns='all'))
def test_basic_2a(self):
print('test_basic_2a')
t2 = Describe(data2)
print(t2.summary(stats='all'))
def test_basic_3(aself):
t1 = Describe(data3)
print(t1.summary(stats='all'))
def test_basic_4a(self):
t1 = Describe(data4)
print(t1.summary(stats='all'))
if __name__ == "__main__":
#unittest.main()
t1 = Describe(data4)
#print(t1.summary(stats='all'))
noperc = ['obs', 'mean', 'std', 'min', 'max', 'ptp', #'mode', #'var',
'median', 'skew', 'uss', 'kurtosis']
#TODO: mode var raise exception,
#TODO: percentile writes list in cell (?), huge wide format
print(t1.summary(stats=noperc))
print(t1.summary())
print(t1.summary( orientation='varcols'))
print(t1.summary(stats=['mean', 'median', 'min', 'max'], orientation=('varcols')))
print(t1.summary(stats='all'))
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from textwrap import dedent
import unittest as ut
import hoep as h
class _IgnoreParagraph(h.Hoep):
def paragraph(self, text):
return text
class BlockRenderer(h.Hoep):
def block_code(self, text, language):
return '[BLOCK_CODE language={1}] {0}'.format(text, language)
def block_html(self, text):
return '[BLOCK_HTML] {0}'.format(text)
def block_quote(self, text):
return '[BLOCK_QUOTE] {0}'.format(text)
def header(self, text, level):
return '[HEADER level={1}] {0}'.format(text, level)
def hrule(self):
return '[HRULE]'
def list(self, text, ordered):
return '[LIST ordered={1}]\n{0}'.format(text, ordered)
def list_item(self, text, ordered):
return '[LIST_ITEM ordered={1}] {0}'.format(text, ordered)
def table(self, header, body):
return '[TABLE]\n[HEADER]{0}\n[BODY]{1}'.format(header, body)
def table_row(self, text):
return '\n[TABLE_ROW]\n{0}'.format(text)
def table_cell(self, text, flags):
return '[TABLE_CELL text={0}]'.format(text)
class DocRenderer(_IgnoreParagraph):
def doc_header(self):
return 'One.\n'
def doc_footer(self):
return '\nFive.'
class FootnoteRenderer(h.Hoep):
def footnotes(self, text):
return '[FOOTNOTES]\n{0}'.format(text)
def footnote_def(self, text, number):
return '[FOOTNOTE_DEF number={1}] {0}'.format(text, number)
def footnote_ref(self, number):
return '[FOOTNOTE_REF number={0}]'.format(number)
class LowRenderer(_IgnoreParagraph):
def entity(self, entity):
return '[ENTITY] {0}'.format(entity)
def normal_text(self, text):
return '[NORMAL_TEXT] {0}'.format(text)
class ParagraphRenderer(h.Hoep):
def paragraph(self, text):
return '[PARAGRAPH] {0}'.format(text)
class PreRenderer(_IgnoreParagraph):
def preprocess(self, markdown):
return 'Nop! {0}'.format(markdown)
class PostRenderer(_IgnoreParagraph):
def postprocess(self, html):
return '{0} {1}'.format(html, html[::-1])
class SpanRenderer(_IgnoreParagraph):
def autolink(self, link, email):
return '[AUTOLINK email={1}] {0}'.format(link, email)
def codespan(self, text):
return '[CODESPAN] {0}'.format(text)
def double_emphasis(self, text):
return '[DOUBLE_EMPHASIS] {0}'.format(text)
def emphasis(self, text):
return '[EMPHASIS] {0}'.format(text)
def highlight(self, text):
return '[HIGHLIGHT] {0}'.format(text)
def image(self, link, title, alt):
return '[IMAGE link={0} title={1} alt={2}]'.format(link, title, alt)
def line_break(self):
return '[LINE_BREAK]'
def link(self, link, title, content):
return '[LINK link={0} title={1}] {2}'.format(link, title, content)
def quote(self, text):
return '[QUOTE] {0}'.format(text)
def raw_html_tag(self, tag):
return '[RAW_HTML_TAG] {0}'.format(tag)
def strikethrough(self, text):
return '[STRIKETHROUGH] {0}'.format(text)
def superscript(self, text):
return '[SUPERSCRIPT] {0}'.format(text)
def triple_emphasis(self, text):
return '[TRIPLE_EMPHASIS] {0}'.format(text)
class UnderlineRenderer(_IgnoreParagraph):
def underline(self, text):
return '[UNDERLINE] {0}'.format(text)
class CustomRendererTestCase(ut.TestCase):
def setUp(self):
self.block = BlockRenderer(h.EXT_FENCED_CODE | h.EXT_TABLES)
self.doc = DocRenderer()
self.footnote = FootnoteRenderer(h.EXT_FOOTNOTES)
self.low = LowRenderer()
self.paragraph = ParagraphRenderer()
self.post = PostRenderer()
self.pre = PreRenderer()
self.span = SpanRenderer(h.EXT_AUTOLINK | h.EXT_HIGHLIGHT | h.EXT_QUOTE | h.EXT_STRIKETHROUGH | h.EXT_SUPERSCRIPT)
self.underline = UnderlineRenderer(h.EXT_UNDERLINE)
def md(self, md, renderer):
return getattr(self, renderer).render(md)
# Document level
def test_preprocess(self):
supplied = 'First!'
expected = 'Nop! First!'
self.assertEqual(self.md(supplied, 'pre'), expected)
def test_doc_header_and_footer(self):
supplied = 'Two.'
expected = 'One.\nTwo.\nFive.'
self.assertEqual(self.md(supplied, 'doc'), expected)
def test_postprocess(self):
supplied = 'echo'
expected = 'echo ohce'
self.assertEqual(self.md(supplied, 'post'), expected)
# Block level
def test_block_code(self):
supplied = '```bash\n$ :(){ :|:& };:\n```'
expected = '[BLOCK_CODE language=bash] $ :(){ :|:& };:\n'
self.assertEqual(self.md(supplied, 'block'), expected)
def test_block_html(self):
supplied = '<p>Hi.</p>'
expected = '[BLOCK_HTML] <p>Hi.</p>\n'
self.assertEqual(self.md(supplied, 'block'), expected)
def test_block_quote(self):
supplied = '> Echo.'
expected = '[BLOCK_QUOTE] <p>Echo.</p>\n'
self.assertEqual(self.md(supplied, 'block'), expected)
def test_footnotes(self):
supplied = 'What you looking at? [^1]\n\n[^1]: Yeah, I\'m talking to you pal.'
expected = dedent('''\
<p>What you looking at? [FOOTNOTE_REF number=1]</p>
[FOOTNOTES]
[FOOTNOTE_DEF number=1] <p>Yeah, I'm talking to you pal.</p>
''')
self.assertEqual(self.md(supplied, 'footnote'), expected)
def test_header(self):
supplied = '## One more to go.'
expected = '[HEADER level=2] One more to go.'
self.assertEqual(self.md(supplied, 'block'), expected)
def test_hrule(self):
supplied = '---'
expected = '[HRULE]'
self.assertEqual(self.md(supplied, 'block'), expected)
def test_list_ordered(self):
supplied = '1. Ehh\n2. Bee\n3. Eee'
expected = dedent('''\
[LIST ordered=True]
[LIST_ITEM ordered=True] Ehh
[LIST_ITEM ordered=True] Bee
[LIST_ITEM ordered=True] Eee
''')
self.assertEqual(self.md(supplied, 'block'), expected)
def test_list_unordered(self):
supplied = '+ One\n+ Two\n+ Five'
expected = dedent('''\
[LIST ordered=False]
[LIST_ITEM ordered=False] One
[LIST_ITEM ordered=False] Two
[LIST_ITEM ordered=False] Five
''')
self.assertEqual(self.md(supplied, 'block'), expected)
def test_paragraph(self):
supplied = 'One might say this is soul sucking...'
expected = '[PARAGRAPH] One might say this is soul sucking...'
self.assertEqual(self.md(supplied, 'paragraph'), expected)
def test_table(self):
supplied = dedent(u'''\
| 1 | 2 | 3 |
| --- | --- | --- |
| X | X | O |
| O | O | X |
| X | O | X |
''')
expected = dedent('''\
[TABLE]
[HEADER]
[TABLE_ROW]
[TABLE_CELL text=1][TABLE_CELL text=2][TABLE_CELL text=3]
[BODY]
[TABLE_ROW]
[TABLE_CELL text=X][TABLE_CELL text=X][TABLE_CELL text=O]
[TABLE_ROW]
[TABLE_CELL text=O][TABLE_CELL text=O][TABLE_CELL text=X]
[TABLE_ROW]
[TABLE_CELL text=X][TABLE_CELL text=O][TABLE_CELL text=X]''')
self.assertEqual(self.md(supplied, 'block'), expected)
# Span level
def test_autolink(self):
supplied = 'https://github.com/'
expected = '[AUTOLINK email=False] https://github.com/'
self.assertEqual(self.md(supplied, 'span'), expected)
def test_codespan(self):
supplied = '`$ rm -Rf tests/`'
expected = '[CODESPAN] $ rm -Rf tests/'
self.assertEqual(self.md(supplied, 'span'), expected)
def test_double_emphasis(self):
supplied = '__strong__'
expected = '[DOUBLE_EMPHASIS] strong'
self.assertEqual(self.md(supplied, 'span'), expected)
def test_emphasis(self):
supplied = '_wat_'
expected = '[EMPHASIS] wat'
self.assertEqual(self.md(supplied, 'span'), expected)
def test_highlight(self):
supplied = '==blink=='
expected = '[HIGHLIGHT] blink'
self.assertEqual(self.md(supplied, 'span'), expected)
def test_image(self):
supplied = ''
expected = '[IMAGE link=spacer.gif title=spacer alt=spacer]'
self.assertEqual(self.md(supplied, 'span'), expected)
def test_image_ref(self):
supplied = '![spacer][spacer]\n\n[spacer]: spacer.gif'
expected = '[IMAGE link=spacer.gif title=None alt=spacer]'
self.assertEqual(self.md(supplied, 'span'), expected)
def test_line_break(self):
supplied = 'So. \nTired.'
expected = 'So.[LINE_BREAK]Tired.'
self.assertEqual(self.md(supplied, 'span'), expected)
def test_link(self):
supplied = '[GitHub](https://github.com/)'
expected = '[LINK link=https://github.com/ title=None] GitHub'
self.assertEqual(self.md(supplied, 'span'), expected)
def test_link_ref(self):
supplied = '[GitHub][github]\n\n[github]: https://github.com/ "GitHub"'
expected = '[LINK link=https://github.com/ title=GitHub] GitHub'
self.assertEqual(self.md(supplied, 'span'), expected)
def test_quote(self):
supplied = '"Air quotes are obnoxious."'
expected = '[QUOTE] Air quotes are obnoxious.'
self.assertEqual(self.md(supplied, 'span'), expected)
def test_raw_html_tag(self):
supplied = '<halp/>'
expected = '[RAW_HTML_TAG] <halp/>'
self.assertEqual(self.md(supplied, 'span'), expected)
def test_strikethrough(self):
supplied = 'I\'m ~~running~~ out of ideas.'
expected = 'I'm [STRIKETHROUGH] running out of ideas.'
self.assertEqual(self.md(supplied, 'span'), expected)
def test_superscript(self):
supplied = '^bro'
expected = '[SUPERSCRIPT] bro'
self.assertEqual(self.md(supplied, 'span'), expected)
def test_triple_emphasis(self):
supplied = 'Triple emphasis? That\'s ___absurd___.'
expected = 'Triple emphasis? That's [TRIPLE_EMPHASIS] absurd.'
self.assertEqual(self.md(supplied, 'span'), expected)
def test_underline(self):
supplied = 'That\'s _it_?'
expected = 'That's [UNDERLINE] it?'
self.assertEqual(self.md(supplied, 'underline'), expected)
# Low-level
def test_low_level(self):
supplied = '☃'
expected = '[NORMAL_TEXT] [ENTITY] ☃'
self.assertEqual(self.md(supplied, 'low'), expected)
|
|
# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for parser.py."""
import unittest2 as unittest
from webkitpy.common.system.logtesting import LoggingTestCase
from webkitpy.style.optparser import ArgumentParser
from webkitpy.style.optparser import ArgumentPrinter
from webkitpy.style.optparser import CommandOptionValues as ProcessorOptions
from webkitpy.style.optparser import DefaultCommandOptionValues
class ArgumentPrinterTest(unittest.TestCase):
"""Tests the ArgumentPrinter class."""
_printer = ArgumentPrinter()
def _create_options(self,
output_format='emacs',
min_confidence=3,
filter_rules=[],
git_commit=None):
return ProcessorOptions(filter_rules=filter_rules,
git_commit=git_commit,
min_confidence=min_confidence,
output_format=output_format)
def test_to_flag_string(self):
options = self._create_options('vs7', 5, ['+foo', '-bar'], 'git')
self.assertEqual('--filter=+foo,-bar --git-commit=git '
'--min-confidence=5 --output=vs7',
self._printer.to_flag_string(options))
# This is to check that --filter and --git-commit do not
# show up when not user-specified.
options = self._create_options()
self.assertEqual('--min-confidence=3 --output=emacs',
self._printer.to_flag_string(options))
class ArgumentParserTest(LoggingTestCase):
"""Test the ArgumentParser class."""
class _MockStdErr(object):
def write(self, message):
# We do not want the usage string or style categories
# to print during unit tests, so print nothing.
return
def _parse(self, args):
"""Call a test parser.parse()."""
parser = self._create_parser()
return parser.parse(args)
def _create_defaults(self):
"""Return a DefaultCommandOptionValues instance for testing."""
base_filter_rules = ["-", "+whitespace"]
return DefaultCommandOptionValues(min_confidence=3,
output_format="vs7")
def _create_parser(self):
"""Return an ArgumentParser instance for testing."""
default_options = self._create_defaults()
all_categories = ["build" ,"whitespace"]
mock_stderr = self._MockStdErr()
return ArgumentParser(all_categories=all_categories,
base_filter_rules=[],
default_options=default_options,
mock_stderr=mock_stderr,
usage="test usage")
def test_parse_documentation(self):
parse = self._parse
# FIXME: Test both the printing of the usage string and the
# filter categories help.
# Request the usage string.
self.assertRaises(SystemExit, parse, ['--help'])
# Request default filter rules and available style categories.
self.assertRaises(SystemExit, parse, ['--filter='])
def test_parse_bad_values(self):
parse = self._parse
# Pass an unsupported argument.
self.assertRaises(SystemExit, parse, ['--bad'])
self.assertLog(['ERROR: no such option: --bad\n'])
self.assertRaises(SystemExit, parse, ['--min-confidence=bad'])
self.assertLog(['ERROR: option --min-confidence: '
"invalid integer value: 'bad'\n"])
self.assertRaises(SystemExit, parse, ['--min-confidence=0'])
self.assertLog(['ERROR: option --min-confidence: invalid integer: 0: '
'value must be between 1 and 5\n'])
self.assertRaises(SystemExit, parse, ['--min-confidence=6'])
self.assertLog(['ERROR: option --min-confidence: invalid integer: 6: '
'value must be between 1 and 5\n'])
parse(['--min-confidence=1']) # works
parse(['--min-confidence=5']) # works
self.assertRaises(SystemExit, parse, ['--output=bad'])
self.assertLog(['ERROR: option --output-format: invalid choice: '
"'bad' (choose from 'emacs', 'vs7')\n"])
parse(['--output=vs7']) # works
# Pass a filter rule not beginning with + or -.
self.assertRaises(SystemExit, parse, ['--filter=build'])
self.assertLog(['ERROR: Invalid filter rule "build": '
'every rule must start with + or -.\n'])
parse(['--filter=+build']) # works
def test_parse_default_arguments(self):
parse = self._parse
(files, options) = parse([])
self.assertEqual(files, [])
self.assertEqual(options.filter_rules, [])
self.assertIsNone(options.git_commit)
self.assertFalse(options.diff_files)
self.assertFalse(options.is_verbose)
self.assertEqual(options.min_confidence, 3)
self.assertEqual(options.output_format, 'vs7')
def test_parse_explicit_arguments(self):
parse = self._parse
# Pass non-default explicit values.
(files, options) = parse(['--min-confidence=4'])
self.assertEqual(options.min_confidence, 4)
(files, options) = parse(['--output=emacs'])
self.assertEqual(options.output_format, 'emacs')
(files, options) = parse(['-g', 'commit'])
self.assertEqual(options.git_commit, 'commit')
(files, options) = parse(['--git-commit=commit'])
self.assertEqual(options.git_commit, 'commit')
(files, options) = parse(['--git-diff=commit'])
self.assertEqual(options.git_commit, 'commit')
(files, options) = parse(['--verbose'])
self.assertTrue(options.is_verbose)
(files, options) = parse(['--diff-files', 'file.txt'])
self.assertTrue(options.diff_files)
# Pass user_rules.
(files, options) = parse(['--filter=+build,-whitespace'])
self.assertEqual(options.filter_rules,
["+build", "-whitespace"])
# Pass spurious white space in user rules.
(files, options) = parse(['--filter=+build, -whitespace'])
self.assertEqual(options.filter_rules,
["+build", "-whitespace"])
def test_parse_files(self):
parse = self._parse
(files, options) = parse(['foo.cpp'])
self.assertEqual(files, ['foo.cpp'])
# Pass multiple files.
(files, options) = parse(['--output=emacs', 'foo.cpp', 'bar.cpp'])
self.assertEqual(files, ['foo.cpp', 'bar.cpp'])
class CommandOptionValuesTest(unittest.TestCase):
"""Tests CommandOptionValues class."""
def test_init(self):
"""Test __init__ constructor."""
# Check default parameters.
options = ProcessorOptions()
self.assertEqual(options.filter_rules, [])
self.assertIsNone(options.git_commit)
self.assertFalse(options.is_verbose)
self.assertEqual(options.min_confidence, 1)
self.assertEqual(options.output_format, "emacs")
# Check argument validation.
self.assertRaises(ValueError, ProcessorOptions, output_format="bad")
ProcessorOptions(output_format="emacs") # No ValueError: works
ProcessorOptions(output_format="vs7") # works
self.assertRaises(ValueError, ProcessorOptions, min_confidence=0)
self.assertRaises(ValueError, ProcessorOptions, min_confidence=6)
ProcessorOptions(min_confidence=1) # works
ProcessorOptions(min_confidence=5) # works
# Check attributes.
options = ProcessorOptions(filter_rules=["+"],
git_commit="commit",
is_verbose=True,
min_confidence=3,
output_format="vs7")
self.assertEqual(options.filter_rules, ["+"])
self.assertEqual(options.git_commit, "commit")
self.assertTrue(options.is_verbose)
self.assertEqual(options.min_confidence, 3)
self.assertEqual(options.output_format, "vs7")
def test_eq(self):
"""Test __eq__ equality function."""
self.assertTrue(ProcessorOptions().__eq__(ProcessorOptions()))
# Also verify that a difference in any argument causes equality to fail.
# Explicitly create a ProcessorOptions instance with all default
# values. We do this to be sure we are assuming the right default
# values in our self.assertFalse() calls below.
options = ProcessorOptions(filter_rules=[],
git_commit=None,
is_verbose=False,
min_confidence=1,
output_format="emacs")
# Verify that we created options correctly.
self.assertTrue(options.__eq__(ProcessorOptions()))
self.assertFalse(options.__eq__(ProcessorOptions(filter_rules=["+"])))
self.assertFalse(options.__eq__(ProcessorOptions(git_commit="commit")))
self.assertFalse(options.__eq__(ProcessorOptions(is_verbose=True)))
self.assertFalse(options.__eq__(ProcessorOptions(min_confidence=2)))
self.assertFalse(options.__eq__(ProcessorOptions(output_format="vs7")))
def test_ne(self):
"""Test __ne__ inequality function."""
# By default, __ne__ always returns true on different objects.
# Thus, just check the distinguishing case to verify that the
# code defines __ne__.
self.assertFalse(ProcessorOptions().__ne__(ProcessorOptions()))
|
|
from configuration.cache import get_configuration
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.test.client import RequestFactory
from unittest.mock import patch
from accounts.tests.test_models import create_new_user
from configuration.configuration import QuestionnaireConfiguration
from qcat.tests import TestCase
from search.views import (
admin,
delete_all,
index,
update,
)
route_search_delete_all = 'search:delete_all'
route_search_admin = 'search:admin'
route_search_index = 'search:index'
route_search_search = 'search:search'
route_search_update = 'search:update'
class AdminTest(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.url = reverse(route_search_admin)
def test_login_required(self):
res = self.client.get(self.url, follow=True)
self.assertTemplateUsed(res, 'login.html')
def test_requires_superuser_permissions(self):
request = self.factory.get(self.url)
request.user = create_new_user()
request.session = {}
with self.assertRaises(PermissionDenied):
admin(request)
def test_renders_correct_template(self):
request = self.factory.get(self.url)
request.user = create_new_user()
request.user.is_superuser = True
request.session = {}
res = admin(request)
self.assertEqual(res.status_code, 200)
@patch('search.views.messages')
class IndexTest(TestCase):
fixtures = [
'sample_global_key_values',
'sample',
]
def setUp(self):
self.factory = RequestFactory()
self.url = reverse(
route_search_index,
kwargs={'configuration': 'sample', 'edition': '2015'})
user = create_new_user()
user.is_superuser = True
self.request = self.factory.get(self.url)
self.request.user = user
self.request.session = {}
def test_login_required(self, mock_messages):
res = self.client.get(self.url, follow=True)
self.assertTemplateUsed(res, 'login.html')
def test_requires_superuser_permissions(self, mock_messages):
request = self.factory.get(self.url)
request.user = create_new_user(id=99, email='foo@bar.com')
request.session = {}
with self.assertRaises(PermissionDenied):
index(request, 'foo', 'bar')
@patch('search.views.get_configuration')
def test_calls_QuestionnaireConfiguration(self, mock_conf, mock_messages):
index(self.request, 'sample', '2015')
mock_conf.assert_called_once_with(code='sample', edition='2015')
@patch('search.views.get_configuration')
def test_returns_bad_request_if_errors_in_configuration(
self, mock_conf, mock_messages):
mock_conf.configuration_error = 'error'
res = index(self.request, 'sample', '2015')
self.assertEqual(res.status_code, 400)
@patch('search.views.get_mappings')
def test_calls_get_mappings(self, mock_get_mappings, mock_messages):
index(self.request, 'sample', '2015')
mock_get_mappings.assert_called_once_with()
@patch('search.views.get_configuration')
@patch('search.views.get_mappings')
@patch('search.views.create_or_update_index')
def test_calls_create_or_update_index(
self, mock_create_index, mock_get_mappings, mock_get_configuration,
mock_messages):
mock_get_configuration.return_value.get_configuration_errors.return_value = None
mock_create_index.return_value = None, None, ''
index(self.request, 'sample', '2015')
mock_create_index.assert_called_once_with(
configuration=mock_get_configuration.return_value,
mappings=mock_get_mappings.return_value)
@patch('search.views.get_mappings')
@patch('search.views.create_or_update_index')
@patch.object(QuestionnaireConfiguration, '__init__')
@patch.object(QuestionnaireConfiguration, 'get_configuration_errors')
def test_adds_message_and_redirects_if_not_successful(
self, mock_get_conf_errors, mock_Conf, mock_create_index,
mock_get_mappings, mock_messages):
mock_Conf.return_value = None
mock_get_conf_errors.return_value = None
mock_create_index.return_value = None, None, 'error_msg'
res = index(self.request, 'sample', '2015')
mock_messages.error.assert_called_once_with(
self.request, 'The following error(s) occured: error_msg')
self.assertEqual(res.status_code, 302)
@patch('search.views.get_mappings')
@patch('search.views.create_or_update_index')
@patch.object(QuestionnaireConfiguration, '__init__')
@patch.object(QuestionnaireConfiguration, 'get_configuration_errors')
def test_adds_message_and_redirects_if_successful(
self, mock_get_conf_errors, mock_Conf, mock_create_index,
mock_get_mappings, mock_messages):
mock_Conf.return_value = None
mock_get_conf_errors.return_value = None
mock_create_index.return_value = True, None, ''
res = index(self.request, 'sample', '2015')
mock_messages.success.assert_called_once_with(
self.request, 'Index "sample" was created or updated.')
self.assertEqual(res.status_code, 302)
@patch('search.views.messages')
class UpdateTest(TestCase):
fixtures = [
'sample_global_key_values',
'sample',
]
def setUp(self):
self.factory = RequestFactory()
self.url = reverse(
route_search_update,
kwargs={'configuration': 'sample', 'edition': '2015'})
user = create_new_user()
user.is_superuser = True
self.request = self.factory.get(self.url)
self.request.user = user
self.request.session = {}
def test_login_required(self, mock_messages):
res = self.client.get(self.url, follow=True)
self.assertTemplateUsed(res, 'login.html')
def test_requires_superuser_permissions(self, mock_messages):
request = self.factory.get(self.url)
request.user = create_new_user(id=99, email='foo@bar.com')
request.session = {}
with self.assertRaises(PermissionDenied):
update(request, 'sample', '2015')
@patch('search.views.put_questionnaire_data')
@patch.object(QuestionnaireConfiguration, '__init__')
@patch.object(QuestionnaireConfiguration, 'get_configuration_errors')
def test_calls_put_questionnaire_data(
self, mock_get_conf_errors, mock_Conf, mock_put_questionnaire_data,
mock_messages):
mock_Conf.return_value = None
mock_get_conf_errors.return_value = None
mock_put_questionnaire_data.return_value = None, []
update(self.request, 'sample', '2015')
self.assertEqual(mock_put_questionnaire_data.call_count, 1)
@patch('search.views.put_questionnaire_data')
@patch.object(QuestionnaireConfiguration, '__init__')
@patch.object(QuestionnaireConfiguration, 'get_configuration_errors')
def test_adds_message_and_redirects_if_not_successful(
self, mock_get_conf_errors, mock_Conf, mock_put_questionnaire_data,
mock_messages):
mock_Conf.return_value = None
mock_get_conf_errors.return_value = None
mock_put_questionnaire_data.return_value = None, ['error_msg']
res = update(self.request, 'sample', '2015')
mock_messages.error.assert_called_once_with(
self.request, 'The following error(s) occured: error_msg')
self.assertEqual(res.status_code, 302)
@patch('search.views.put_questionnaire_data')
@patch.object(QuestionnaireConfiguration, '__init__')
@patch.object(QuestionnaireConfiguration, 'get_configuration_errors')
def test_adds_message_and_redirects_if_successful(
self, mock_get_conf_errors, mock_Conf, mock_put_questionnaire_data,
mock_messages):
mock_Conf.return_value = None
mock_get_conf_errors.return_value = None
mock_put_questionnaire_data.return_value = 0, []
res = update(self.request, 'sample', '2015')
mock_messages.success.assert_called_once_with(
self.request,
'0 Questionnaires of configuration "sample" successfully indexed.')
self.assertEqual(res.status_code, 302)
@patch('search.views.messages')
class DeleteTest(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.url = reverse(route_search_delete_all)
user = create_new_user()
user.is_superuser = True
self.request = self.factory.get(self.url)
self.request.user = user
self.request.session = {}
def test_login_required(self, mock_messages):
res = self.client.get(self.url, follow=True)
self.assertTemplateUsed(res, 'login.html')
def test_requires_superuser_permissions(self, mock_messages):
request = self.factory.get(self.url)
request.user = create_new_user(id=99, email='foo@bar.com')
request.session = {}
with self.assertRaises(PermissionDenied):
delete_all(request)
@patch('search.views.delete_all_indices')
def test_calls_delete_all_indices(
self, mock_delete_all_indices, mock_messages):
mock_delete_all_indices.return_value = True, ''
delete_all(self.request)
mock_delete_all_indices.assert_called_once_with()
@patch('search.views.delete_all_indices')
def test_adds_message_and_redirects_if_not_successful(
self, mock_delete_all_indices, mock_messages):
mock_delete_all_indices.return_value = False, 'foo'
res = delete_all(self.request)
mock_messages.error.assert_called_once_with(
self.request, 'The following error(s) occured: foo')
self.assertEqual(res.status_code, 302)
@patch('search.views.delete_all_indices')
def test_adds_message_and_redirects_if_successful(
self, mock_delete_all_indices, mock_messages):
mock_delete_all_indices.return_value = True, ''
res = delete_all(self.request)
mock_messages.success.assert_called_once_with(
self.request, 'All indices successfully deleted.')
self.assertEqual(res.status_code, 302)
|
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'GroupResolution.status'
db.add_column(
'sentry_groupresolution',
'status',
self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')(default=0),
keep_default=False
)
def backwards(self, orm):
# Deleting field 'GroupResolution.status'
db.delete_column('sentry_groupresolution', 'status')
models = {
'sentry.activity': {
'Meta': {
'object_name': 'Activity'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True'
}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Event']",
'null': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.apikey': {
'Meta': {
'object_name': 'ApiKey'
},
'allowed_origins':
('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '32'
}),
'label': (
'django.db.models.fields.CharField', [], {
'default': "'Default'",
'max_length': '64',
'blank': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Organization']"
}
),
'scopes': ('django.db.models.fields.BigIntegerField', [], {
'default': 'None'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.auditlogentry': {
'Meta': {
'object_name': 'AuditLogEntry'
},
'actor': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_actors'",
'null': 'True',
'to': "orm['sentry.User']"
}
),
'actor_key': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.ApiKey']",
'null': 'True',
'blank': 'True'
}
),
'actor_label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ip_address': (
'django.db.models.fields.GenericIPAddressField', [], {
'max_length': '39',
'null': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'target_object':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'target_user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_targets'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.authidentity': {
'Meta': {
'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))",
'object_name': 'AuthIdentity'
},
'auth_provider': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.AuthProvider']"
}
),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'last_synced':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_verified':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.authprovider': {
'Meta': {
'object_name': 'AuthProvider'
},
'config': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'default_global_access':
('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'default_role':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50'
}),
'default_teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']",
'unique': 'True'
}
),
'provider': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'sync_time':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
})
},
'sentry.broadcast': {
'Meta': {
'object_name': 'Broadcast'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_expires': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime(2015, 12, 2, 0, 0)',
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_active':
('django.db.models.fields.BooleanField', [], {
'default': 'True',
'db_index': 'True'
}),
'link': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.CharField', [], {
'max_length': '256'
}),
'title': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'upstream_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'blank': 'True'
}
)
},
'sentry.broadcastseen': {
'Meta': {
'unique_together': "(('broadcast', 'user'),)",
'object_name': 'BroadcastSeen'
},
'broadcast': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Broadcast']"
}
),
'date_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.event': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'Event',
'db_table': "'sentry_message'",
'index_together': "(('group', 'datetime'),)"
},
'data':
('sentry.db.models.fields.node.NodeField', [], {
'null': 'True',
'blank': 'True'
}),
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'event_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'db_column': "'message_id'"
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'event_set'",
'null': 'True',
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'null': 'True'
}
),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'time_spent':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'null': 'True'
})
},
'sentry.eventmapping': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'EventMapping'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
},
'sentry.eventuser': {
'Meta': {
'unique_together':
"(('project', 'ident'), ('project', 'hash'))",
'object_name':
'EventUser',
'index_together':
"(('project', 'email'), ('project', 'username'), ('project', 'ip_address'))"
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
}),
'ip_address': (
'django.db.models.fields.GenericIPAddressField', [], {
'max_length': '39',
'null': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'username':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
})
},
'sentry.file': {
'Meta': {
'object_name': 'File'
},
'blob': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.FileBlob']",
'null': 'True'
}
),
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '40',
'null': 'True'
}),
'headers': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'path': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'size':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'storage':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
}),
'storage_options': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'timestamp': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'type': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.fileblob': {
'Meta': {
'object_name': 'FileBlob'
},
'checksum':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '40'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'path': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'size':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'storage': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'storage_options': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'timestamp': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
)
},
'sentry.group': {
'Meta': {
'object_name': 'Group',
'db_table': "'sentry_groupedmessage'",
'index_together': "(('project', 'first_release'),)"
},
'active_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'culprit': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'db_column': "'view'",
'blank': 'True'
}
),
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']",
'null': 'True'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_public': (
'django.db.models.fields.NullBooleanField', [], {
'default': 'False',
'null': 'True',
'blank': 'True'
}
),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'level': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "''",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'null': 'True'
}
),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'resolved_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'time_spent_count':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'time_spent_total':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'times_seen': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '1',
'db_index': 'True'
}
)
},
'sentry.groupassignee': {
'Meta': {
'object_name': 'GroupAssignee',
'db_table': "'sentry_groupasignee'"
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'unique': 'True',
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_assignee_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupbookmark': {
'Meta': {
'unique_together': "(('project', 'user', 'group'),)",
'object_name': 'GroupBookmark'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_bookmark_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupemailthread': {
'Meta': {
'unique_together': "(('email', 'group'), ('email', 'msgid'))",
'object_name': 'GroupEmailThread'
},
'date': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'groupemail_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'msgid': ('django.db.models.fields.CharField', [], {
'max_length': '100'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'groupemail_set'",
'to': "orm['sentry.Project']"
}
)
},
'sentry.grouphash': {
'Meta': {
'unique_together': "(('project', 'hash'),)",
'object_name': 'GroupHash'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
)
},
'sentry.groupmeta': {
'Meta': {
'unique_together': "(('group', 'key'),)",
'object_name': 'GroupMeta'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupresolution': {
'Meta': {
'object_name': 'GroupResolution'
},
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'unique': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouprulestatus': {
'Meta': {
'unique_together': "(('rule', 'group'),)",
'object_name': 'GroupRuleStatus'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_active': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'rule': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Rule']"
}
),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
})
},
'sentry.groupseen': {
'Meta': {
'unique_together': "(('user', 'group'),)",
'object_name': 'GroupSeen'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'db_index': 'False'
}
)
},
'sentry.groupsnooze': {
'Meta': {
'object_name': 'GroupSnooze'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'unique': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'until': ('django.db.models.fields.DateTimeField', [], {})
},
'sentry.grouptagkey': {
'Meta': {
'unique_together': "(('project', 'group', 'key'),)",
'object_name': 'GroupTagKey'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouptagvalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value', 'group'),)",
'object_name': 'GroupTagValue',
'db_table': "'sentry_messagefiltervalue'"
},
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'null': 'True',
'to': "orm['sentry.Project']"
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.helppage': {
'Meta': {
'object_name': 'HelpPage'
},
'content': ('django.db.models.fields.TextField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_visible': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'key': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'unique': 'True',
'null': 'True'
}
),
'priority':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50'
}),
'title': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.lostpasswordhash': {
'Meta': {
'object_name': 'LostPasswordHash'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'unique': 'True'
}
)
},
'sentry.option': {
'Meta': {
'object_name': 'Option'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '64'
}),
'last_updated':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {
'object_name': 'Organization'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'default_role':
('django.db.models.fields.CharField', [], {
'default': "'member'",
'max_length': '32'
}),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'members': (
'django.db.models.fields.related.ManyToManyField', [], {
'related_name': "'org_memberships'",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMember']",
'to': "orm['sentry.User']"
}
),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'slug':
('django.db.models.fields.SlugField', [], {
'unique': 'True',
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.organizationaccessrequest': {
'Meta': {
'unique_together': "(('team', 'member'),)",
'object_name': 'OrganizationAccessRequest'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'member': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.organizationmember': {
'Meta': {
'unique_together': "(('organization', 'user'), ('organization', 'email'))",
'object_name': 'OrganizationMember'
},
'counter': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': (
'django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'member_set'",
'to': "orm['sentry.Organization']"
}
),
'role':
('django.db.models.fields.CharField', [], {
'default': "'member'",
'max_length': '32'
}),
'teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMemberTeam']",
'blank': 'True'
}
),
'type': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50',
'blank': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'sentry_orgmember_set'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.organizationmemberteam': {
'Meta': {
'unique_together': "(('team', 'organizationmember'),)",
'object_name': 'OrganizationMemberTeam',
'db_table': "'sentry_organizationmember_teams'"
},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'organizationmember': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.organizationoption': {
'Meta': {
'unique_together': "(('organization', 'key'),)",
'object_name': 'OrganizationOption',
'db_table': "'sentry_organizationoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.project': {
'Meta': {
'unique_together': "(('team', 'slug'), ('organization', 'slug'))",
'object_name': 'Project'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'first_event': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'public': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50',
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.projectkey': {
'Meta': {
'object_name': 'ProjectKey'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Project']"
}
),
'public_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'roles': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'secret_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.projectoption': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'ProjectOption',
'db_table': "'sentry_projectoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.release': {
'Meta': {
'unique_together': "(('project', 'version'),)",
'object_name': 'Release'
},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_released':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'blank': 'True'
}),
'date_started':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'blank': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'new_groups':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'owner': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True',
'blank': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'ref': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'url': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'version': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.releasefile': {
'Meta': {
'unique_together': "(('release', 'ident'),)",
'object_name': 'ReleaseFile'
},
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'name': ('django.db.models.fields.TextField', [], {}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
)
},
'sentry.rule': {
'Meta': {
'object_name': 'Rule'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
},
'sentry.savedsearch': {
'Meta': {
'unique_together': "(('project', 'name'),)",
'object_name': 'SavedSearch'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.tagkey': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'TagKey',
'db_table': "'sentry_filterkey'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'label':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.tagvalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value'),)",
'object_name': 'TagValue',
'db_table': "'sentry_filtervalue'"
},
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.team': {
'Meta': {
'unique_together': "(('organization', 'slug'),)",
'object_name': 'Team'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.user': {
'Meta': {
'object_name': 'User',
'db_table': "'auth_user'"
},
'date_joined':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'blank': 'True'
}),
'first_name':
('django.db.models.fields.CharField', [], {
'max_length': '30',
'blank': 'True'
}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'is_managed': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_staff': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'last_login':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_name':
('django.db.models.fields.CharField', [], {
'max_length': '30',
'blank': 'True'
}),
'password': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'username':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '128'
})
},
'sentry.useroption': {
'Meta': {
'unique_together': "(('user', 'project', 'key'),)",
'object_name': 'UserOption'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {
'object_name': 'UserReport',
'index_together': "(('project', 'event_id'),)"
},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
}
}
complete_apps = ['sentry']
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
client.py
---------
TxTrader Client module - Expose class API as user interface.
Copyright (c) 2015 Reliance Systems Inc. <mkrueger@rstms.net>
Licensed under the MIT license. See LICENSE for details.
"""
import os
import sys
import requests
from types import *
import re
from txtrader_client.version import VERSION
import txtrader_client.defaults
class API():
def __init__(self, mode='rtx', config={}):
# 1st: confguration default values from module defaults
defaults = {
k: getattr(txtrader_client.defaults, k)
for k in dir(txtrader_client.defaults) if not k.startswith('__')
}
# 2nd: any variables set in environment override defaults
self.config = {k: os.environ.get(k, defaults[k]) for k in defaults}
# 3rd: parameters passed in config take highest priority
self.config.update(config)
protocol = self._config('PROTOCOL')
hostname = self._config('HOST')
port = self._config('HTTP_PORT')
self.url = f'{protocol}://{hostname}:{port}'
self.username = self._config('USERNAME')
self.password = self._config('PASSWORD')
self.account = self._config('API_ACCOUNT')
self.route = self._config('ROUTE')
def _config(self, key):
name = f'TXTRADER_{key}'
try:
ret = self.config[name]
except KeyError as ex:
raise ex(f'missing config value {name}')
return ret
def _call_txtrader_api(self, function_name, args):
headers = {'Content-type': 'application/json', 'Connection': 'close'}
parameters = dict(headers=headers, auth=(self.username, self.password))
if args:
method = requests.post
parameters['json'] = args
else:
method = requests.get
with method(f"{self.url}/{function_name}", **parameters) as r:
if r.status_code != requests.codes.ok:
r.raise_for_status()
ret = r.json()
return ret
def help(self):
"""Return dict containing brief documentation for each server API call"""
return self._call_txtrader_api('help', {})
def status(self):
"""return string describing current API connection status"""
return self._call_txtrader_api('status', {})
def version(self):
"""Return string containing release version of current server instance"""
return self._call_txtrader_api('version', {})
def shutdown(self, message: str):
"""Request server shutdown; post message to logs"""
return self._call_txtrader_api('shutdown', {'message': message})
def uptime(self):
"""Return string showing start time and elapsed time for current server instance"""
return self._call_txtrader_api('uptime', {})
def time(self):
"""Return formatted timestamp string (YYYY-MM-DD HH:MM:SS) matching latest datafeed time update"""
return self._call_txtrader_api('time', {})
def query_symbol_bars(self, symbol: str):
"""Return array of current live bar data for given symbol"""
return self._call_txtrader_api('query_symbol_bars', {'symbol': symbol})
def query_bars(self, symbol, period, start, end):
"""Return array of bar data for symbol=<str> period=<minutes_as_integer|hour|day|month> start,end='YYYY-MM-DD HH:MM[:00]'"""
if type(symbol) != str:
raise TypeError('symbol: %s' % repr(symbol))
if not type(period) in [int, str]:
raise TypeError('period: %s' % repr(period))
if type(period) == str:
if re.match('^\\d*$', period):
period = int(period)
else:
period = period[0].upper()
if not period in ('D', 'W', 'M'):
raise ValueError('period: %s must match ^([Dd]|[Mm]|[Ww]).* (DAY,WEEK,MONTH)' % repr(period))
for label, value in (('start', start), ('end', end)):
if type(value) == str:
if value == '.':
pass
elif re.match('^-\\d*$', value):
pass
elif not re.match('^\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}(:\\d{2})*$', value):
raise ValueError('%s: %s must match pattern YYYY-MM-DD HH:MM[:00]' % (label, repr(value)))
elif type(value) != int:
raise TypeError('%s: %s' % (label, repr(value)))
args = {'symbol': symbol, 'period': period, 'start': start, 'end': end}
return self._call_txtrader_api('query_bars', args)
def add_symbol(self, symbol: str):
"""Request subscription to a symbol for price updates, bardata and order entry"""
return self._call_txtrader_api('add_symbol', {'symbol': symbol})
def del_symbol(self, symbol: str):
"""Delete subscription to a symbol for price updates and order entry"""
return self._call_txtrader_api('del_symbol', {'symbol': symbol})
def query_symbols(self):
"""Return the list of active symbols"""
return self._call_txtrader_api('query_symbols', {'data': False})
def query_all_symbols(self):
"""Return dict keyed by symbol containing current data for all active symbols"""
return self._call_txtrader_api('query_symbols', {'data': True})
def query_symbol(self, symbol: str):
"""Return dict containing current data for given symbol"""
return self._call_txtrader_api('query_symbol', {'symbol': symbol})
def query_symbol_data(self, symbol: str):
"""Return dict containing rawdata for given symbol"""
return self._call_txtrader_api('query_symbol_data', {'symbol': symbol})
def query_accounts(self):
"""Return array of account names"""
return self._call_txtrader_api('query_accounts', {})
def query_account(self, account: str, fields: str = None):
"""Query account data for account. [fields] is list of fields to select; None=all fields"""
if fields and (type(fields) != str):
raise TypeError('fields: %s' % repr(fields))
args = {'account': account}
if fields:
args['fields'] = fields
ret = self._call_txtrader_api('query_account', args)
return ret
def set_account(self, account: str):
"""Select current active trading account"""
ret = self._call_txtrader_api('set_account', {'account': account})
if ret:
self.account = account
return ret
def query_positions(self):
"""Return dict keyed by account containing dicts of position data fields"""
return self._call_txtrader_api('query_positions', {})
def query_orders(self):
"""Return dict keyed by order id containing dicts of order data fields"""
return self._call_txtrader_api('query_orders', {})
def query_tickets(self):
"""Return dict keyed by order id containing dicts of staged order ticket data fields"""
return self._call_txtrader_api('query_tickets', {})
def query_order(self, order_id: str):
"""Return dict containing order/ticket status fields for given order id"""
return self._call_txtrader_api('query_order', {'id': order_id})
def cancel_order(self, order_id: str):
"""Request cancellation of a pending order"""
return self._call_txtrader_api('cancel_order', {'id': order_id})
def query_order_executions(self, order_id: str):
"""Return dict keyed by execution id containing dicts of execution report data fields for given order_id"""
return self._call_txtrader_api('query_order_executions', {'id': order_id})
def query_execution(self, execution_id: str):
"""Return dict containing execution report data fields for given execution id"""
return self._call_txtrader_api('query_execution', {'id': execution_id})
def query_executions(self):
"""Return dict keyed by execution id containing dicts of execution report data fields"""
return self._call_txtrader_api('query_executions', {})
def set_order_route(self, route: str):
"""Set order route data given route {'route_name': {parameter: value, ...} (JSON string will be parsed into a route dict)}"""
return self._call_txtrader_api('set_order_route', {'route': route})
def get_order_route(self):
"""Return current order route as a dict"""
return self._call_txtrader_api('get_order_route', {})
def market_order(self, account: str, route: str, symbol: str, quantity: int):
"""Submit a market order, returning dict containing new order fields"""
return self._call_txtrader_api(
'market_order', {
'account': account,
'route': route,
'symbol': symbol,
'quantity': quantity
}
)
def stage_market_order(self, tag: str, account: str, route: str, symbol: str, quantity: int):
"""Submit a staged market order (displays as staged in GUI, requiring manual aproval), returning dict containing new order fields"""
return self._call_txtrader_api(
'stage_market_order', {
'tag': tag,
'account': account,
'route': route,
'symbol': symbol,
'quantity': quantity
}
)
def limit_order(self, account: str, route: str, symbol: str, limit_price: float, quantity: int):
"""Submit a limit order, returning dict containing new order fields"""
return self._call_txtrader_api(
'limit_order', {
'account': account,
'route': route,
'symbol': symbol,
'limit_price': float(limit_price),
'quantity': quantity
}
)
def stop_order(self, account: str, route: str, symbol: str, stop_price: float, quantity: int):
"""Submit a stop order, returning dict containing new order fields"""
return self._call_txtrader_api(
'stop_order', {
'account': account,
'route': route,
'symbol': symbol,
'stop_price': stop_price,
'quantity': quantity
}
)
def stoplimit_order(
self, account: str, route: str, symbol: str, stop_price: float, limit_price: float, quantity: int
):
"""Submit a stop-limit order, returning dict containing new order fields"""
return self._call_txtrader_api(
'stoplimit_order', {
'account': account,
'route': route,
'symbol': symbol,
'stop_price': float(stop_price),
'limit_price': float(limit_price),
'quantity': int(quantity)
}
)
def global_cancel(self):
"""Request cancellation of all pending orders"""
return self._call_txtrader_api('global_cancel', {})
|
|
from psycopg2.extras import Inet
from django.conf import settings
from django.db.backends.base.operations import BaseDatabaseOperations
class DatabaseOperations(BaseDatabaseOperations):
def unification_cast_sql(self, output_field):
internal_type = output_field.get_internal_type()
if internal_type in ("GenericIPAddressField", "IPAddressField", "TimeField", "UUIDField"):
# PostgreSQL will resolve a union as type 'text' if input types are
# 'unknown'.
# https://www.postgresql.org/docs/current/static/typeconv-union-case.html
# These fields cannot be implicitly cast back in the default
# PostgreSQL configuration so we need to explicitly cast them.
# We must also remove components of the type within brackets:
# varchar(255) -> varchar.
return 'CAST(%%s AS %s)' % output_field.db_type(self.connection).split('(')[0]
return '%s'
def date_extract_sql(self, lookup_type, field_name):
# https://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-EXTRACT
if lookup_type == 'week_day':
# For consistency across backends, we return Sunday=1, Saturday=7.
return "EXTRACT('dow' FROM %s) + 1" % field_name
else:
return "EXTRACT('%s' FROM %s)" % (lookup_type, field_name)
def date_trunc_sql(self, lookup_type, field_name):
# https://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC
return "DATE_TRUNC('%s', %s)" % (lookup_type, field_name)
def _convert_field_to_tz(self, field_name, tzname):
if settings.USE_TZ:
field_name = "%s AT TIME ZONE '%s'" % (field_name, tzname)
return field_name
def datetime_cast_date_sql(self, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
return '(%s)::date' % field_name
def datetime_cast_time_sql(self, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
return '(%s)::time' % field_name
def datetime_extract_sql(self, lookup_type, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
return self.date_extract_sql(lookup_type, field_name)
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
# https://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC
return "DATE_TRUNC('%s', %s)" % (lookup_type, field_name)
def time_trunc_sql(self, lookup_type, field_name):
return "DATE_TRUNC('%s', %s)::time" % (lookup_type, field_name)
def deferrable_sql(self):
return " DEFERRABLE INITIALLY DEFERRED"
def fetch_returned_insert_ids(self, cursor):
"""
Given a cursor object that has just performed an INSERT...RETURNING
statement into a table that has an auto-incrementing ID, return the
list of newly created IDs.
"""
return [item[0] for item in cursor.fetchall()]
def lookup_cast(self, lookup_type, internal_type=None):
lookup = '%s'
# Cast text lookups to text to allow things like filter(x__contains=4)
if lookup_type in ('iexact', 'contains', 'icontains', 'startswith',
'istartswith', 'endswith', 'iendswith', 'regex', 'iregex'):
if internal_type in ('IPAddressField', 'GenericIPAddressField'):
lookup = "HOST(%s)"
elif internal_type in ('CharField', 'TextField'):
lookup = '%s'
else:
lookup = "%s::text"
# Use UPPER(x) for case-insensitive lookups; it's faster.
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
lookup = 'UPPER(%s)' % lookup
return lookup
def last_insert_id(self, cursor, table_name, pk_name):
# Use pg_get_serial_sequence to get the underlying sequence name
# from the table name and column name (available since PostgreSQL 8)
cursor.execute("SELECT CURRVAL(pg_get_serial_sequence('%s','%s'))" % (
self.quote_name(table_name), pk_name))
return cursor.fetchone()[0]
def no_limit_value(self):
return None
def prepare_sql_script(self, sql):
return [sql]
def quote_name(self, name):
if name.startswith('"') and name.endswith('"'):
return name # Quoting once is enough.
return '"%s"' % name
def set_time_zone_sql(self):
return "SET TIME ZONE %s"
def sql_flush(self, style, tables, sequences, allow_cascade=False):
if tables:
# Perform a single SQL 'TRUNCATE x, y, z...;' statement. It allows
# us to truncate tables referenced by a foreign key in any other
# table.
tables_sql = ', '.join(
style.SQL_FIELD(self.quote_name(table)) for table in tables)
if allow_cascade:
sql = ['%s %s %s;' % (
style.SQL_KEYWORD('TRUNCATE'),
tables_sql,
style.SQL_KEYWORD('CASCADE'),
)]
else:
sql = ['%s %s;' % (
style.SQL_KEYWORD('TRUNCATE'),
tables_sql,
)]
sql.extend(self.sequence_reset_by_name_sql(style, sequences))
return sql
else:
return []
def sequence_reset_by_name_sql(self, style, sequences):
# 'ALTER SEQUENCE sequence_name RESTART WITH 1;'... style SQL statements
# to reset sequence indices
sql = []
for sequence_info in sequences:
table_name = sequence_info['table']
column_name = sequence_info['column']
if not (column_name and len(column_name) > 0):
# This will be the case if it's an m2m using an autogenerated
# intermediate table (see BaseDatabaseIntrospection.sequence_list)
column_name = 'id'
sql.append("%s setval(pg_get_serial_sequence('%s','%s'), 1, false);" % (
style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(self.quote_name(table_name)),
style.SQL_FIELD(column_name),
))
return sql
def tablespace_sql(self, tablespace, inline=False):
if inline:
return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace)
else:
return "TABLESPACE %s" % self.quote_name(tablespace)
def sequence_reset_sql(self, style, model_list):
from django.db import models
output = []
qn = self.quote_name
for model in model_list:
# Use `coalesce` to set the sequence for each model to the max pk value if there are records,
# or 1 if there are none. Set the `is_called` property (the third argument to `setval`) to true
# if there are records (as the max pk value is already in use), otherwise set it to false.
# Use pg_get_serial_sequence to get the underlying sequence name from the table name
# and column name (available since PostgreSQL 8)
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
output.append(
"%s setval(pg_get_serial_sequence('%s','%s'), "
"coalesce(max(%s), 1), max(%s) %s null) %s %s;" % (
style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(qn(model._meta.db_table)),
style.SQL_FIELD(f.column),
style.SQL_FIELD(qn(f.column)),
style.SQL_FIELD(qn(f.column)),
style.SQL_KEYWORD('IS NOT'),
style.SQL_KEYWORD('FROM'),
style.SQL_TABLE(qn(model._meta.db_table)),
)
)
break # Only one AutoField is allowed per model, so don't bother continuing.
for f in model._meta.many_to_many:
if not f.remote_field.through:
output.append(
"%s setval(pg_get_serial_sequence('%s','%s'), "
"coalesce(max(%s), 1), max(%s) %s null) %s %s;" % (
style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(qn(f.m2m_db_table())),
style.SQL_FIELD('id'),
style.SQL_FIELD(qn('id')),
style.SQL_FIELD(qn('id')),
style.SQL_KEYWORD('IS NOT'),
style.SQL_KEYWORD('FROM'),
style.SQL_TABLE(qn(f.m2m_db_table()))
)
)
return output
def prep_for_iexact_query(self, x):
return x
def max_name_length(self):
"""
Return the maximum length of an identifier.
The maximum length of an identifier is 63 by default, but can be
changed by recompiling PostgreSQL after editing the NAMEDATALEN
macro in src/include/pg_config_manual.h.
This implementation returns 63, but can be overridden by a custom
database backend that inherits most of its behavior from this one.
"""
return 63
def distinct_sql(self, fields):
if fields:
return 'DISTINCT ON (%s)' % ', '.join(fields)
else:
return 'DISTINCT'
def last_executed_query(self, cursor, sql, params):
# http://initd.org/psycopg/docs/cursor.html#cursor.query
# The query attribute is a Psycopg extension to the DB API 2.0.
if cursor.query is not None:
return cursor.query.decode()
return None
def return_insert_id(self):
return "RETURNING %s", ()
def bulk_insert_sql(self, fields, placeholder_rows):
placeholder_rows_sql = (", ".join(row) for row in placeholder_rows)
values_sql = ", ".join("(%s)" % sql for sql in placeholder_rows_sql)
return "VALUES " + values_sql
def adapt_datefield_value(self, value):
return value
def adapt_datetimefield_value(self, value):
return value
def adapt_timefield_value(self, value):
return value
def adapt_ipaddressfield_value(self, value):
if value:
return Inet(value)
return None
def subtract_temporals(self, internal_type, lhs, rhs):
if internal_type == 'DateField':
lhs_sql, lhs_params = lhs
rhs_sql, rhs_params = rhs
return "(interval '1 day' * (%s - %s))" % (lhs_sql, rhs_sql), lhs_params + rhs_params
return super().subtract_temporals(internal_type, lhs, rhs)
|
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Command-line interface to StackStorm.
"""
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import json
import time
import argparse
import calendar
import logging
import traceback
import six
import requests
from st2client import __version__
from st2client import models
from st2client.client import Client
from st2client.commands import auth
from st2client.commands import action
from st2client.commands import action_alias
from st2client.commands import keyvalue
from st2client.commands import policy
from st2client.commands import resource
from st2client.commands import sensor
from st2client.commands import trace
from st2client.commands import trigger
from st2client.commands import triggerinstance
from st2client.commands import webhook
from st2client.commands import rule
from st2client.config_parser import CLIConfigParser
from st2client.config_parser import ST2_CONFIG_DIRECTORY
from st2client.config_parser import ST2_CONFIG_PATH
from st2client.exceptions.operations import OperationFailureException
from st2client.utils.date import parse as parse_isotime
from st2client.utils.misc import merge_dicts
from st2client.utils.logging import LogLevelFilter
__all__ = [
'Shell'
]
LOG = logging.getLogger(__name__)
CLI_DESCRIPTION = 'CLI for StackStorm event-driven automation platform. http://stackstorm.com'
CACHED_TOKEN_PATH = os.path.abspath(os.path.join(ST2_CONFIG_DIRECTORY, 'token'))
# How many seconds before the token actual expiration date we should consider the token as
# expired. This is used to prevent the operation from failing durig the API request because the
# token was just about to expire.
TOKEN_EXPIRATION_GRACE_PERIOD_SECONDS = 15
CONFIG_OPTION_TO_CLIENT_KWARGS_MAP = {
'base_url': ['general', 'base_url'],
'auth_url': ['auth', 'url'],
'api_url': ['api', 'url'],
'api_version': ['general', 'api_version'],
'cacert': ['general', 'cacert'],
'debug': ['cli', 'debug']
}
class Shell(object):
def __init__(self):
# Set up of endpoints is delayed until program is run.
self.client = None
# Set up the main parser.
self.parser = argparse.ArgumentParser(description=CLI_DESCRIPTION)
# Set up general program options.
self.parser.add_argument(
'--version',
action='version',
version='%(prog)s {version}'.format(version=__version__))
self.parser.add_argument(
'--url',
action='store',
dest='base_url',
default=None,
help='Base URL for the API servers. Assumes all servers uses the '
'same base URL and default ports are used. Get ST2_BASE_URL'
'from the environment variables by default.'
)
self.parser.add_argument(
'--auth-url',
action='store',
dest='auth_url',
default=None,
help='URL for the autentication service. Get ST2_AUTH_URL'
'from the environment variables by default.'
)
self.parser.add_argument(
'--api-url',
action='store',
dest='api_url',
default=None,
help='URL for the API server. Get ST2_API_URL'
'from the environment variables by default.'
)
self.parser.add_argument(
'--api-version',
action='store',
dest='api_version',
default=None,
help='API version to sue. Get ST2_API_VERSION'
'from the environment variables by default.'
)
self.parser.add_argument(
'--cacert',
action='store',
dest='cacert',
default=None,
help='Path to the CA cert bundle for the SSL endpoints. '
'Get ST2_CACERT from the environment variables by default. '
'If this is not provided, then SSL cert will not be verified.'
)
self.parser.add_argument(
'--config-file',
action='store',
dest='config_file',
default=None,
help='Path to the CLI config file'
)
self.parser.add_argument(
'--print-config',
action='store_true',
dest='print_config',
default=False,
help='Parse the config file and print the values'
)
self.parser.add_argument(
'--skip-config',
action='store_true',
dest='skip_config',
default=False,
help='Don\'t parse and use the CLI config file'
)
self.parser.add_argument(
'--debug',
action='store_true',
dest='debug',
default=False,
help='Enable debug mode'
)
# Set up list of commands and subcommands.
self.subparsers = self.parser.add_subparsers()
self.commands = dict()
self.commands['action'] = action.ActionBranch(
'An activity that happens as a response to the external event.',
self, self.subparsers)
self.commands['action-alias'] = action_alias.ActionAliasBranch(
'Action aliases.',
self, self.subparsers)
self.commands['auth'] = auth.TokenCreateCommand(
models.Token, self, self.subparsers, name='auth')
self.commands['execution'] = action.ActionExecutionBranch(
'An invocation of an action.',
self, self.subparsers)
self.commands['key'] = keyvalue.KeyValuePairBranch(
'Key value pair is used to store commonly used configuration '
'for reuse in sensors, actions, and rules.',
self, self.subparsers)
self.commands['policy'] = policy.PolicyBranch(
'Policy that is enforced on a resource.',
self, self.subparsers)
self.commands['policy-type'] = policy.PolicyTypeBranch(
'Type of policy that can be applied to resources.',
self, self.subparsers)
self.commands['rule'] = rule.RuleBranch(
'A specification to invoke an "action" on a "trigger" selectively '
'based on some criteria.',
self, self.subparsers)
self.commands['run'] = action.ActionRunCommand(
models.Action, self, self.subparsers, name='run', add_help=False)
self.commands['runner'] = resource.ResourceBranch(
models.RunnerType,
'Runner is a type of handler for a specific class of actions.',
self, self.subparsers, read_only=True)
self.commands['sensor'] = sensor.SensorBranch(
'An adapter which allows you to integrate StackStorm with external system ',
self, self.subparsers)
self.commands['trace'] = trace.TraceBranch(
'A group of executions, rules and triggerinstances that are related.',
self, self.subparsers)
self.commands['trigger'] = trigger.TriggerTypeBranch(
'An external event that is mapped to a st2 input. It is the '
'st2 invocation point.',
self, self.subparsers)
self.commands['trigger-instance'] = triggerinstance.TriggerInstanceBranch(
'Actual instances of triggers received by st2.',
self, self.subparsers)
self.commands['webhook'] = webhook.WebhookBranch(
'Webhooks.',
self, self.subparsers)
def get_client(self, args, debug=False):
ST2_CLI_SKIP_CONFIG = os.environ.get('ST2_CLI_SKIP_CONFIG', 0)
ST2_CLI_SKIP_CONFIG = int(ST2_CLI_SKIP_CONFIG)
skip_config = args.skip_config
skip_config = skip_config or ST2_CLI_SKIP_CONFIG
# Note: Options provided as the CLI argument have the highest precedence
# Precedence order: cli arguments > environment variables > rc file variables
cli_options = ['base_url', 'auth_url', 'api_url', 'api_version', 'cacert']
cli_options = {opt: getattr(args, opt) for opt in cli_options}
config_file_options = self._get_config_file_options(args=args)
kwargs = {}
if not skip_config:
# Config parsing is skipped
kwargs = merge_dicts(kwargs, config_file_options)
kwargs = merge_dicts(kwargs, cli_options)
kwargs['debug'] = debug
client = Client(**kwargs)
if ST2_CLI_SKIP_CONFIG:
# Config parsing is skipped
LOG.info('Skipping parsing CLI config')
return client
# If credentials are provided in the CLI config use them and try to authenticate
rc_config = self._parse_config_file(args=args)
credentials = rc_config.get('credentials', {})
username = credentials.get('username', None)
password = credentials.get('password', None)
cache_token = rc_config.get('cli', {}).get('cache_token', False)
if username and password:
# Credentials are provided, try to authenticate agaist the API
try:
token = self._get_auth_token(client=client, username=username, password=password,
cache_token=cache_token)
except requests.exceptions.ConnectionError as e:
LOG.warn('Auth API server is not available, skipping authentication.')
LOG.exception(e)
return client
except Exception as e:
print('Failed to authenticate with credentials provided in the config.')
raise e
client.token = token
# TODO: Hack, refactor when splitting out the client
os.environ['ST2_AUTH_TOKEN'] = token
return client
def run(self, argv):
debug = False
if '--print-config' in argv:
# Hack because --print-config requires no command to be specified
argv = argv + ['action', 'list']
# Parse command line arguments.
args = self.parser.parse_args(args=argv)
print_config = args.print_config
if print_config:
self._print_config(args=args)
return 3
try:
debug = getattr(args, 'debug', False)
# Set up client.
self.client = self.get_client(args=args, debug=debug)
# Execute command.
args.func(args)
return 0
except OperationFailureException as e:
if debug:
self._print_debug_info(args=args)
return 2
except Exception as e:
# We allow exception to define custom exit codes
exit_code = getattr(e, 'exit_code', 1)
print('ERROR: %s\n' % e)
if debug:
self._print_debug_info(args=args)
return exit_code
def _print_config(self, args):
config = self._parse_config_file(args=args)
for section, options in six.iteritems(config):
print('[%s]' % (section))
for name, value in six.iteritems(options):
print('%s = %s' % (name, value))
def _print_debug_info(self, args):
# Print client settings
self._print_client_settings(args=args)
# Print exception traceback
traceback.print_exc()
def _print_client_settings(self, args):
client = self.client
if not client:
return
config_file_path = self._get_config_file_path(args=args)
print('CLI settings:')
print('----------------')
print('Config file path: %s' % (config_file_path))
print('Client settings:')
print('----------------')
print('ST2_BASE_URL: %s' % (client.endpoints['base']))
print('ST2_AUTH_URL: %s' % (client.endpoints['auth']))
print('ST2_API_URL: %s' % (client.endpoints['api']))
print('ST2_AUTH_TOKEN: %s' % (os.environ.get('ST2_AUTH_TOKEN')))
print('')
print('Proxy settings:')
print('---------------')
print('HTTP_PROXY: %s' % (os.environ.get('HTTP_PROXY', '')))
print('HTTPS_PROXY: %s' % (os.environ.get('HTTPS_PROXY', '')))
print('')
def _get_auth_token(self, client, username, password, cache_token):
"""
Retrieve a valid auth token.
If caching is enabled, we will first try to retrieve cached token from a
file system. If cached token is expired or not available, we will try to
authenticate using the provided credentials and retrieve a new auth
token.
:rtype: ``str``
"""
if cache_token:
token = self._get_cached_auth_token(client=client, username=username,
password=password)
else:
token = None
if not token:
# Token is either expired or not available
token_obj = self._authenticate_and_retrieve_auth_token(client=client,
username=username,
password=password)
self._cache_auth_token(token_obj=token_obj)
token = token_obj.token
return token
def _get_cached_auth_token(self, client, username, password):
"""
Retrieve cached auth token from the file in the config directory.
:rtype: ``str``
"""
if not os.path.isdir(ST2_CONFIG_DIRECTORY):
os.makedirs(ST2_CONFIG_DIRECTORY)
if not os.path.isfile(CACHED_TOKEN_PATH):
return None
if not os.access(ST2_CONFIG_DIRECTORY, os.R_OK):
# We don't have read access to the file with a cached token
message = ('Unable to retrieve cached token from "%s" (user %s doesn\'t have read '
'access to the parent directory). Subsequent requests won\'t use a '
'cached token meaning they may be slower.' % (CACHED_TOKEN_PATH,
os.getlogin()))
LOG.warn(message)
return None
if not os.access(CACHED_TOKEN_PATH, os.R_OK):
# We don't have read access to the file with a cached token
message = ('Unable to retrieve cached token from "%s" (user %s doesn\'t have read '
'access to this file). Subsequent requests won\'t use a cached token '
'meaning they may be slower.' % (CACHED_TOKEN_PATH, os.getlogin()))
LOG.warn(message)
return None
with open(CACHED_TOKEN_PATH) as fp:
data = fp.read()
try:
data = json.loads(data)
token = data['token']
expire_timestamp = data['expire_timestamp']
except Exception as e:
msg = 'File with cached token is corrupted: %s' % (str(e))
raise ValueError(msg)
now = int(time.time())
if (expire_timestamp + TOKEN_EXPIRATION_GRACE_PERIOD_SECONDS) < now:
# Token has expired
return None
return token
def _cache_auth_token(self, token_obj):
"""
Cache auth token in the config directory.
:param token_obj: Token object.
:type token_obj: ``object``
"""
if not os.path.isdir(ST2_CONFIG_DIRECTORY):
os.makedirs(ST2_CONFIG_DIRECTORY)
if not os.access(ST2_CONFIG_DIRECTORY, os.W_OK):
# We don't have write access to the file with a cached token
message = ('Unable to write token to "%s" (user %s doesn\'t have write'
'access to the parent directory). Subsequent requests won\'t use a '
'cached token meaning they may be slower.' % (CACHED_TOKEN_PATH,
os.getlogin()))
LOG.warn(message)
return None
if os.path.isfile(CACHED_TOKEN_PATH) and not os.access(CACHED_TOKEN_PATH, os.W_OK):
# We don't have write access to the file with a cached token
message = ('Unable to write token to "%s" (user %s doesn\'t have write'
'access to this file). Subsequent requests won\'t use a '
'cached token meaning they may be slower.' % (CACHED_TOKEN_PATH,
os.getlogin()))
LOG.warn(message)
return None
token = token_obj.token
expire_timestamp = parse_isotime(token_obj.expiry)
expire_timestamp = calendar.timegm(expire_timestamp.timetuple())
data = {}
data['token'] = token
data['expire_timestamp'] = expire_timestamp
data = json.dumps(data)
# Note: We explictly use fdopen instead of open + chmod to avoid a security issue.
# open + chmod are two operations which means that during a short time frame (between
# open and chmod) when file can potentially be read by other users if the default
# permissions used during create allow that.
fd = os.open(CACHED_TOKEN_PATH, os.O_WRONLY | os.O_CREAT, 0600)
with os.fdopen(fd, 'w') as fp:
fp.write(data)
return True
def _authenticate_and_retrieve_auth_token(self, client, username, password):
manager = models.ResourceManager(models.Token, client.endpoints['auth'],
cacert=client.cacert, debug=client.debug)
instance = models.Token()
instance = manager.create(instance, auth=(username, password))
return instance
def _get_config_file_path(self, args):
"""
Retrieve path to the CLI configuration file.
:rtype: ``str``
"""
path = os.environ.get('ST2_CONFIG_FILE', ST2_CONFIG_PATH)
if args.config_file:
path = args.config_file
path = os.path.abspath(path)
if path != ST2_CONFIG_PATH and not os.path.isfile(path):
raise ValueError('Config "%s" not found' % (path))
return path
def _parse_config_file(self, args):
config_file_path = self._get_config_file_path(args=args)
parser = CLIConfigParser(config_file_path=config_file_path, validate_config_exists=False)
result = parser.parse()
return result
def _get_config_file_options(self, args):
"""
Parse the config and return kwargs which can be passed to the Client
constructor.
:rtype: ``dict``
"""
rc_options = self._parse_config_file(args=args)
result = {}
for kwarg_name, (section, option) in six.iteritems(CONFIG_OPTION_TO_CLIENT_KWARGS_MAP):
result[kwarg_name] = rc_options.get(section, {}).get(option, None)
return result
def setup_logging(argv):
debug = '--debug' in argv
root = LOG
root.setLevel(logging.WARNING)
handler = logging.StreamHandler(sys.stderr)
handler.setLevel(logging.WARNING)
formatter = logging.Formatter('%(asctime)s %(levelname)s - %(message)s')
handler.setFormatter(formatter)
if not debug:
handler.addFilter(LogLevelFilter(log_levels=[logging.ERROR]))
root.addHandler(handler)
def main(argv=sys.argv[1:]):
setup_logging(argv)
return Shell().run(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
|
import logging
import claripy
import archinfo
from .sim_type import SimTypeChar
from .sim_type import SimTypePointer
from .sim_type import SimTypeFixedSizeArray
from .sim_type import SimTypeArray
from .sim_type import SimTypeString
from .sim_type import SimTypeFunction
from .sim_type import SimTypeFloat
from .sim_type import SimTypeDouble
from .sim_type import SimStruct
from .state_plugins.sim_action_object import SimActionObject
l = logging.getLogger("angr.calling_conventions")
class PointerWrapper(object):
def __init__(self, value):
self.value = value
class AllocHelper(object):
def __init__(self, ptr, grow_like_stack, reverse_result):
self.ptr = ptr
self.grow_like_stack = grow_like_stack
self.reverse_result = reverse_result
def dump(self, val, state, endness='Iend_BE'):
if self.grow_like_stack:
self.ptr -= val.length / 8
state.memory.store(self.ptr, val, endness=endness)
return self.ptr.reversed if self.reverse_result else self.ptr
else:
state.memory.store(self.ptr, val, endness=endness)
out = self.ptr
self.ptr += val.length / 8
return out.reversed if self.reverse_result else out
class SimFunctionArgument(object):
def __init__(self, size):
self.size = size
def __ne__(self, other):
return not self == other
def check_value(self, value):
if not isinstance(value, claripy.ast.Base) and self.size is None:
raise TypeError("Only claripy objects may be stored through SimFunctionArgument when size is not provided")
if self.size is not None and isinstance(value, claripy.ast.Base) and self.size*8 < value.length:
raise TypeError("%s doesn't fit in an argument of size %d" % (value, self.size))
def set_value(self, state, value, **kwargs):
raise NotImplementedError
def get_value(self, state, **kwargs):
raise NotImplementedError
class SimRegArg(SimFunctionArgument):
def __init__(self, reg_name, size, alt_offsets=None):
SimFunctionArgument.__init__(self, size)
self.reg_name = reg_name
self.alt_offsets = {} if alt_offsets is None else alt_offsets
def __repr__(self):
return "<%s>" % self.reg_name
def __eq__(self, other):
return type(other) is SimRegArg and self.reg_name == other.reg_name
def _fix_offset(self, state, size):
"""
This is a hack to deal with small values being stored at offsets into large registers unpredictably
"""
if size is None: size = self.size
offset = state.arch.registers[self.reg_name][0]
if size in self.alt_offsets:
return offset + self.alt_offsets[size], size
elif size < self.size and state.arch.register_endness == 'Iend_BE':
return offset + (self.size - size), size
return offset, size
def set_value(self, state, value, endness=None, size=None, **kwargs): # pylint: disable=unused-argument
self.check_value(value)
if endness is None: endness = state.arch.register_endness
offset, size = self._fix_offset(state, size)
state.registers.store(offset, value, endness=endness, size=size)
def get_value(self, state, endness=None, size=None, **kwargs): # pylint: disable=unused-argument
if endness is None: endness = state.arch.register_endness
offset, size = self._fix_offset(state, size)
return state.registers.load(offset, endness=endness, size=size)
class SimStackArg(SimFunctionArgument):
def __init__(self, stack_offset, size):
SimFunctionArgument.__init__(self, size)
self.stack_offset = stack_offset
def __repr__(self):
return "[%#x]" % self.stack_offset
def __eq__(self, other):
return type(other) is SimStackArg and self.stack_offset == other.stack_offset
def set_value(self, state, value, endness=None, stack_base=None): # pylint: disable=arguments-differ
self.check_value(value)
if endness is None: endness = state.arch.memory_endness
if stack_base is None: stack_base = state.regs.sp
state.memory.store(stack_base + self.stack_offset, value, endness=endness, size=self.size)
def get_value(self, state, endness=None, stack_base=None, size=None): # pylint: disable=arguments-differ
if endness is None: endness = state.arch.memory_endness
if stack_base is None: stack_base = state.regs.sp
return state.memory.load(stack_base + self.stack_offset, endness=endness, size=size or self.size)
class SimComboArg(SimFunctionArgument):
def __init__(self, locations):
super(SimComboArg, self).__init__(sum(x.size for x in locations))
self.locations = locations
def __repr__(self):
return 'SimComboArg(%s)' % repr(self.locations)
def __eq__(self, other):
return type(other) is SimComboArg and all(a == b for a, b in zip(self.locations, other.locations))
def set_value(self, state, value, endness=None, **kwargs):
self.check_value(value)
if endness is None: endness = state.arch.memory_endness
if isinstance(value, (int, long)):
value = claripy.BVV(value, self.size*8)
elif isinstance(value, float):
if self.size not in (4, 8):
raise ValueError("What do I do with a float %d bytes long" % self.size)
value = claripy.FPV(value, claripy.FSORT_FLOAT if self.size == 4 else claripy.FSORT_DOUBLE)
cur = 0
for loc in reversed(self.locations):
loc.set_value(state, value[cur*8 + loc.size*8 - 1:cur*8], endness, **kwargs)
cur += loc.size
def get_value(self, state, endness=None, **kwargs):
if endness is None: endness = state.arch.memory_endness
vals = []
for loc in self.locations:
vals.append(loc.get_value(state, endness, **kwargs))
return claripy.Concat(*vals)
class ArgSession(object):
"""
A class to keep track of the state accumulated in laying parameters out into memory
"""
def __init__(self, cc):
self.cc = cc
self.real_args = None
self.fp_iter = None
self.int_iter = None
self.both_iter = None
if cc.args is None:
self.fp_iter = cc.fp_args
self.int_iter = cc.int_args
self.both_iter = cc.both_args
else:
self.real_args = iter(cc.args)
# TODO: use safer errors than TypeError and ValueError
def next_arg(self, is_fp, size=None):
if self.real_args is not None:
try:
arg = next(self.real_args)
if is_fp and self.cc.is_fp_arg(arg) is False:
raise TypeError("Can't put a float here - concrete arg positions are specified")
elif not is_fp and self.cc.is_fp_arg(arg) is True:
raise TypeError("Can't put an int here - concrete arg positions are specified")
except StopIteration:
raise TypeError("Accessed too many arguments - concrete number are specified")
else:
try:
if is_fp:
arg = next(self.fp_iter)
else:
arg = next(self.int_iter)
except StopIteration:
try:
arg = next(self.both_iter)
except StopIteration:
raise TypeError("Accessed too many arguments - exhausted all positions?")
if size is not None and size > arg.size:
arg = self.upsize_arg(arg, is_fp, size)
return arg
def upsize_arg(self, arg, is_fp, size):
if not is_fp:
raise ValueError("You can't fit a integral value of size %d into an argument of size %d!" % (size, arg.size))
if not isinstance(arg, SimStackArg):
raise ValueError("I don't know how to handle this? please report to @rhelmot")
arg_size = arg.size
locations = [arg]
while arg_size < size:
next_arg = self.next_arg(is_fp, None)
arg_size += next_arg.size
locations.append(next_arg)
return SimComboArg(locations)
class SimCC(object):
"""
A calling convention allows you to extract from a state the data passed from function to
function by calls and returns. Most of the methods provided by SimCC that operate on a state
assume that the program is just after a call but just before stack frame allocation, though
this may be overridden with the `stack_base` parameter to each individual method.
This is the base class for all calling conventions.
An instance of this class allows it to be tweaked to the way a specific function should be called.
"""
def __init__(self, arch, args=None, ret_val=None, sp_delta=None, func_ty=None):
"""
:param arch: The Archinfo arch for this CC
:param args: A list of SimFunctionArguments describing where the arguments go
:param ret_val: A SimFunctionArgument describing where the return value goes
:param sp_delta: The amount the stack pointer changes over the course of this function - CURRENTLY UNUSED
:parmm func_ty: A SimType for the function itself
"""
if func_ty is not None:
if not isinstance(func_ty, SimTypeFunction):
raise TypeError("Function prototype must be a function!")
self.arch = arch
self.args = args
self.ret_val = ret_val
self.sp_delta = sp_delta
self.func_ty = func_ty if func_ty is None else func_ty.with_arch(arch)
@classmethod
def from_arg_kinds(cls, arch, fp_args, ret_fp=False, sizes=None, sp_delta=None, func_ty=None):
"""
Get an instance of the class that will extract floating-point/integral args correctly.
:param arch: The Archinfo arch for this CC
:param fp_args: A list, with one entry for each argument the function can take. True if the argument is fp,
false if it is integral.
:param ret_fp: True if the return value for the function is fp.
:param sizes: Optional: A list, with one entry for each argument the function can take. Each entry is the
size of the corresponding argument in bytes.
:param sp_delta: The amount the stack pointer changes over the course of this function - CURRENTLY UNUSED
:parmm func_ty: A SimType for the function itself
"""
basic = cls(arch, sp_delta=sp_delta, func_ty=func_ty)
basic.args = basic.arg_locs(fp_args, sizes)
basic.ret_val = basic.fp_return_val if ret_fp else basic.return_val
return basic
#
# Here are all the things a subclass needs to specify!
#
ARG_REGS = None # A list of all the registers used for integral args, in order (names or offsets)
FP_ARG_REGS = None # A list of all the registers used for floating point args, in order
STACKARG_SP_BUFF = 0 # The amount of stack space reserved between the saved return address
# (if applicable) and the arguments. Probably zero.
STACKARG_SP_DIFF = 0 # The amount of stack space reserved for the return address
RETURN_ADDR = None # The location where the return address is stored, as a SimFunctionArgument
RETURN_VAL = None # The location where the return value is stored, as a SimFunctionArgument
FP_RETURN_VAL = None # The location where floating-point argument return values are stored
ARCH = None # The archinfo.Arch class that this CC must be used for, if relevant
CALLEE_CLEANUP = False # Whether the callee has to deallocate the stack space for the arguments
#
# Here are several things you MAY want to override to change your cc's convention
#
@property
def int_args(self):
"""
Iterate through all the possible arg positions that can only be used to store integer or pointer values
Does not take into account customizations.
Returns an iterator of SimFunctionArguments
"""
if self.ARG_REGS is None:
raise NotImplementedError()
for reg in self.ARG_REGS: # pylint: disable=not-an-iterable
yield SimRegArg(reg, self.arch.bytes)
@property
def both_args(self):
"""
Iterate through all the possible arg positions that can be used to store any kind of argument
Does not take into account customizations.
Returns an iterator of SimFunctionArguments
"""
turtle = self.STACKARG_SP_BUFF + self.STACKARG_SP_DIFF
while True:
yield SimStackArg(turtle, self.arch.bytes)
turtle += self.arch.bytes
@property
def fp_args(self):
"""
Iterate through all the possible arg positions that can only be used to store floating point values
Does not take into account customizations.
Returns an iterator of SimFunctionArguments
"""
if self.FP_ARG_REGS is None:
raise NotImplementedError()
for reg in self.FP_ARG_REGS: # pylint: disable=not-an-iterable
yield SimRegArg(reg, self.arch.registers[reg][1])
def is_fp_arg(self, arg):
"""
This should take a SimFunctionArgument instance and return whether or not that argument is a floating-point
argument.
Returns True for MUST be a floating point arg,
False for MUST NOT be a floating point arg,
None for when it can be either.
"""
if arg in self.int_args:
return False
if arg in self.fp_args or arg == self.FP_RETURN_VAL:
return True
return None
ArgSession = ArgSession # import this from global scope so SimCC subclasses can subclass it if they like
@property
def arg_session(self):
"""
Return an arg session.
A session provides the control interface necessary to describe how integral and floating-point arguments are
laid out into memory. The default behavior is that there are a finite list of int-only and fp-only argument
slots, and an infinite number of generic slots, and when an argument of a given type is requested, the most
slot available is used. If you need different behavior, subclass ArgSession.
"""
return self.ArgSession(self)
def stack_space(self, args):
"""
:param args: A list of SimFunctionArguments
:returns: The number of bytes that should be allocated on the stack to store all these args,
NOT INCLUDING the return address.
"""
out = self.STACKARG_SP_DIFF
for arg in args:
if isinstance(arg, SimStackArg):
out = max(out, arg.stack_offset + self.arch.bytes)
out += self.STACKARG_SP_BUFF
return out
@property
def return_val(self):
"""
The location the return value is stored.
"""
# pylint: disable=unsubscriptable-object
return self.RETURN_VAL if self.ret_val is None else self.ret_val
@property
def fp_return_val(self):
return self.FP_RETURN_VAL if self.ret_val is None else self.ret_val
@property
def return_addr(self):
"""
The location the return address is stored.
"""
return self.RETURN_ADDR
#
# Useful functions!
#
@staticmethod
def is_fp_value(val):
return isinstance(val, (float, claripy.ast.FP)) or \
(isinstance(val, claripy.ast.Base) and val.op.startswith('fp')) or \
(isinstance(val, claripy.ast.Base) and val.op == 'Reverse' and val.args[0].op.startswith('fp'))
def arg_locs(self, is_fp, sizes=None):
"""
Pass this a list of whether each parameter is floating-point or not, and get back a list of
SimFunctionArguments. Optionally, pass a list of argument sizes (in bytes) as well.
If you've customized this CC, this will sanity-check the provided locations with the given list.
"""
session = self.arg_session
if sizes is None: sizes = [self.arch.bytes]*len(is_fp)
return [session.next_arg(ifp, size=sz) for ifp, sz in zip(is_fp, sizes)]
def arg(self, state, index, stack_base=None):
"""
Returns a bitvector expression representing the nth argument of a function.
`stack_base` is an optional pointer to the top of the stack at the function start. If it is not
specified, use the current stack pointer.
WARNING: this assumes that none of the arguments are floating-point and they're all single-word-sized, unless
you've customized this CC.
"""
session = self.arg_session
if self.args is None:
arg_loc = [session.next_arg(False) for _ in xrange(index + 1)][-1]
else:
arg_loc = self.args[index]
return arg_loc.get_value(state, stack_base=stack_base)
def get_args(self, state, is_fp=None, sizes=None, stack_base=None):
"""
`is_fp` should be a list of booleans specifying whether each corresponding argument is floating-point -
True for fp and False for int. For a shorthand to assume that all the parameters are int, pass the number of
parameters as an int.
If you've customized this CC, you may omit this parameter entirely. If it is provided, it is used for
sanity-checking.
`sizes` is an optional list of argument sizes, in bytes. Be careful about using this if you've made explicit
the arg locations, since it might decide to combine two locations into one if an arg is too big.
`stack_base` is an optional pointer to the top of the stack at the function start. If it is not
specified, use the current stack pointer.
Returns a list of bitvector expressions representing the arguments of a function.
"""
if sizes is None and self.func_ty is not None:
sizes = [arg.size for arg in self.func_ty.args]
if is_fp is None:
if self.args is None:
if self.func_ty is None:
raise ValueError("You must either customize this CC or pass a value to is_fp!")
else:
arg_locs = self.arg_locs()
else:
arg_locs = self.args
elif type(is_fp) is int:
if self.args is not None and len(self.args) != is_fp:
raise ValueError("Bad number of args requested: got %d, expected %d" % (is_fp, len(self.args)))
arg_locs = self.arg_locs([False]*is_fp, sizes)
else:
arg_locs = self.arg_locs(is_fp, sizes)
return [loc.get_value(state, stack_base=stack_base) for loc in arg_locs]
def setup_callsite(self, state, ret_addr, args, stack_base=None, alloc_base=None, grow_like_stack=True):
"""
This function performs the actions of the caller getting ready to jump into a function.
:param state: The SimState to operate on
:param ret_addr: The address to return to when the called function finishes
:param args: The list of arguments that that the called function will see
:param stack_base: An optional pointer to use as the top of the stack, circa the function entry point
:param alloc_base: An optional pointer to use as the place to put excess argument data
:param grow_like_stack: When allocating data at alloc_base, whether to allocate at decreasing addresses
The idea here is that you can provide almost any kind of python type in `args` and it'll be translated to a
binary format to be placed into simulated memory. Lists (representing arrays) must be entirely elements of the
same type and size, while tuples (representing structs) can be elements of any type and size.
If you'd like there to be a pointer to a given value, wrap the value in a `PointerWrapper`. Any value
that can't fit in a register will be automatically put in a
PointerWrapper.
If stack_base is not provided, the current stack pointer will be used, and it will be updated.
If alloc_base is not provided, the current stack pointer will be used, and it will be updated.
You might not like the results if you provide stack_base but not alloc_base.
grow_like_stack controls the behavior of allocating data at alloc_base. When data from args needs to be wrapped
in a pointer, the pointer needs to point somewhere, so that data is dumped into memory at alloc_base. If you
set alloc_base to point to somewhere other than the stack, set grow_like_stack to False so that sequencial
allocations happen at increasing addresses.
"""
allocator = AllocHelper(alloc_base if alloc_base is not None else state.regs.sp,
grow_like_stack,
self.arch.memory_endness == 'Iend_LE')
if self.func_ty is not None:
vals = [self._standardize_value(arg, ty, state, allocator.dump) for arg, ty in zip(args, self.func_ty.args)]
else:
vals = [self._standardize_value(arg, None, state, allocator.dump) for arg in args]
arg_session = self.arg_session
arg_locs = [None]*len(args)
for i, (arg, val) in enumerate(zip(args, vals)):
if self.is_fp_value(arg) or \
(self.func_ty is not None and isinstance(self.func_ty.args[i], SimTypeFloat)):
arg_locs[i] = arg_session.next_arg(is_fp=True, size=val.length/8)
continue
if val.length > state.arch.bits or (self.func_ty is None and isinstance(arg, (str, unicode, list, tuple))):
vals[i] = allocator.dump(val, state)
elif val.length < state.arch.bits:
if self.arch.memory_endness == 'Iend_LE':
vals[i] = val.concat(claripy.BVV(0, state.arch.bits - val.length))
else:
vals[i] = claripy.BVV(0, state.arch.bits - val.length).concat(val)
arg_locs[i] = arg_session.next_arg(is_fp=False, size=vals[i].length/8)
if alloc_base is None:
state.regs.sp = allocator.ptr
if stack_base is None:
# I am... not sure why we add SP_DIFF. As far as I can tell, stack_space includes it already,
# since it's just the max of all the end-offsets of the stack arguments?
# TODO: try disabling it and seeing if anything breaks
state.regs.sp -= self.stack_space(arg_locs) + self.STACKARG_SP_DIFF
for loc, val in zip(arg_locs, vals):
loc.set_value(state, val, endness='Iend_BE', stack_base=stack_base)
self.return_addr.set_value(state, ret_addr, stack_base=stack_base)
def teardown_callsite(self, state, return_val=None, arg_types=None, force_callee_cleanup=False):
"""
This function performs the actions of the callee as it's getting ready to return.
It returns the address to return to.
:param state: The state to mutate
:param return_val: The value to return
:param arg_types: The fp-ness of each of the args. Used to calculate sizes to clean up
:param force_callee_cleanup: If we should clean up the stack allocation for the arguments even if it's not
the callee's job to do so
TODO: support the stack_base parameter from setup_callsite...? Does that make sense in this context?
Maybe it could make sense by saying that you pass it in as something like the "saved base pointer" value?
"""
if return_val is not None:
self.set_return_val(state, return_val)
ret_addr = self.return_addr.get_value(state)
if state.arch.sp_offset is not None:
if force_callee_cleanup or self.CALLEE_CLEANUP:
if arg_types is not None:
session = self.arg_session
state.regs.sp += self.stack_space([session.next_arg(x) for x in arg_types])
elif self.args is not None:
state.regs.sp += self.stack_space(self.args)
else:
l.warning("Can't perform callee cleanup when I have no idea how many arguments there are! Assuming 0")
state.regs.sp += self.STACKARG_SP_DIFF
else:
state.regs.sp += self.STACKARG_SP_DIFF
return ret_addr
# pylint: disable=unused-argument
def get_return_val(self, state, is_fp=None, size=None, stack_base=None):
"""
Get the return value out of the given state
"""
ty = self.func_ty.returnty if self.func_ty is not None else None
if self.ret_val is not None:
loc = self.ret_val
elif is_fp is not None:
loc = self.FP_RETURN_VAL if is_fp else self.RETURN_VAL
elif ty is not None:
loc = self.FP_RETURN_VAL if isinstance(ty, SimTypeFloat) else self.RETURN_VAL
else:
loc = self.RETURN_VAL
if loc is None:
raise NotImplementedError("This SimCC doesn't know how to get this value - should be implemented")
val = loc.get_value(state, stack_base=stack_base, size=None if ty is None else ty.size/8)
if self.is_fp_arg(loc) or self.is_fp_value(val) or isinstance(ty, SimTypeFloat):
val = val.raw_to_fp()
return val
def set_return_val(self, state, val, is_fp=None, size=None, stack_base=None):
"""
Set the return value into the given state
"""
ty = self.func_ty.returnty if self.func_ty is not None else None
try:
betterval = self._standardize_value(val, ty, state, None)
except AttributeError:
raise ValueError("Can't fit value %s into a return value" % repr(val))
if self.ret_val is not None:
loc = self.ret_val
elif is_fp is not None:
loc = self.FP_RETURN_VAL if is_fp else self.RETURN_VAL
elif ty is not None:
loc = self.FP_RETURN_VAL if isinstance(ty, SimTypeFloat) else self.RETURN_VAL
else:
loc = self.FP_RETURN_VAL if self.is_fp_value(val) else self.RETURN_VAL
if loc is None:
raise NotImplementedError("This SimCC doesn't know how to store this value - should be implemented")
loc.set_value(state, betterval, endness='Iend_BE', stack_base=stack_base)
#
# Helper functions
#
@staticmethod
def _standardize_value(arg, ty, state, alloc):
check = ty is not None
if isinstance(arg, SimActionObject):
return SimCC._standardize_value(arg.ast, ty, state, alloc)
elif isinstance(arg, PointerWrapper):
if check and not isinstance(ty, SimTypePointer):
raise TypeError("Type mismatch: expected %s, got pointer-wrapper" % ty.name)
real_value = SimCC._standardize_value(arg.value, ty.pts_to if check else None, state, alloc)
return alloc(real_value, state)
elif isinstance(arg, str):
# TODO: when we switch to py3, distinguish between str and bytes
# by null-terminating str but not bytes :/
arg += '\0'
ref = False
if check:
if isinstance(ty, SimTypePointer) and \
isinstance(ty.pts_to, SimTypeChar):
ref = True
elif isinstance(ty, SimTypeFixedSizeArray) and \
isinstance(ty.elem_type, SimTypeChar):
ref = False
if len(arg) > ty.length:
raise TypeError("String %s is too long for %s" % (repr(arg), ty.name))
arg = arg.ljust(ty.length, '\0')
elif isinstance(ty, SimTypeArray) and \
isinstance(ty.elem_type, SimTypeChar):
ref = True
if ty.length is not None:
if len(arg) > ty.length:
raise TypeError("String %s is too long for %s" % (repr(arg), ty.name))
arg = arg.ljust(ty.length, '\0')
elif isinstance(ty, SimTypeString):
ref = False
if len(arg) > ty.length + 1:
raise TypeError("String %s is too long for %s" % (repr(arg), ty.name))
arg = arg.ljust(ty.length + 1, '\0')
else:
raise TypeError("Type mismatch: Expected %s, got char*" % ty.name)
val = SimCC._standardize_value(map(ord, arg), SimTypeFixedSizeArray(SimTypeChar(), len(arg)), state, alloc)
if ref:
val = alloc(val, state)
return val
elif isinstance(arg, list):
ref = False
subty = None
if check:
if isinstance(ty, SimTypePointer):
ref = True
subty = ty.pts_to
elif isinstance(ty, SimTypeFixedSizeArray):
ref = False
subty = ty.elem_type
if len(arg) != ty.length:
raise TypeError("Array %s is the wrong length for %s" % (repr(arg), ty.name))
elif isinstance(ty, SimTypeArray):
ref = True
subty = ty.elem_type
if ty.length is not None:
if len(arg) != ty.length:
raise TypeError("Array %s is the wrong length for %s" % (repr(arg), ty.name))
else:
raise TypeError("Type mismatch: Expected %s, got char*" % ty.name)
else:
types = map(type, arg)
if types[1:] != types[:-1]:
raise TypeError("All elements of list must be of same type")
val = claripy.Concat(*[SimCC._standardize_value(sarg, subty, state, alloc) for sarg in arg])
if ref:
val = alloc(val, state)
return val
elif isinstance(arg, tuple):
if check:
if not isinstance(ty, SimStruct):
raise TypeError("Type mismatch: Expected %s, got tuple (i.e. struct)" % ty.name)
if len(arg) != len(ty.fields):
raise TypeError("Wrong number of fields in struct, expected %d got %d" % (len(ty.fields), len(arg)))
return claripy.Concat(*[SimCC._standardize_value(sarg, sty, state, alloc)
for sarg, sty
in zip(arg, ty.fields.values())])
else:
return claripy.Concat(*[SimCC._standardize_value(sarg, None, state, alloc) for sarg in arg])
elif isinstance(arg, (int, long)):
if check and isinstance(ty, SimTypeFloat):
return SimCC._standardize_value(float(arg), ty, state, alloc)
val = state.se.BVV(arg, ty.size if check else state.arch.bits)
if state.arch.memory_endness == 'Iend_LE':
val = val.reversed
return val
elif isinstance(arg, float):
sort = claripy.FSORT_FLOAT
if check:
if isinstance(ty, SimTypeDouble):
sort = claripy.FSORT_DOUBLE
elif isinstance(ty, SimTypeFloat):
pass
else:
raise TypeError("Type mismatch: expectd %s, got float" % ty.name)
else:
sort = claripy.FSORT_DOUBLE if state.arch.bits == 64 else claripy.FSORT_FLOAT
val = claripy.fpToIEEEBV(claripy.FPV(arg, sort))
if state.arch.memory_endness == 'Iend_LE':
val = val.reversed # pylint: disable=no-member
return val
elif isinstance(arg, claripy.ast.FP):
val = claripy.fpToIEEEBV(arg)
if state.arch.memory_endness == 'Iend_LE':
val = val.reversed # pylint: disable=no-member
return val
elif isinstance(arg, claripy.ast.Base):
# yikes
if state.arch.memory_endness == 'Iend_LE' and arg.length == state.arch.bits:
arg = arg.reversed
return arg
else:
raise TypeError("I don't know how to serialize %s." % repr(arg))
def __repr__(self):
return "<" + self.__class__.__name__ + '>'
@classmethod
def _match(cls, arch, args, sp_delta):
if cls.ARCH is not None and not isinstance(arch, cls.ARCH):
return False
if sp_delta != cls.STACKARG_SP_DIFF:
return False
sample_inst = cls(arch)
all_fp_args = list(sample_inst.fp_args)
all_int_args = list(sample_inst.int_args)
both_iter = sample_inst.both_args
some_both_args = [next(both_iter) for _ in xrange(len(args))]
for arg in args:
if arg not in all_fp_args and arg not in all_int_args and arg not in some_both_args:
return False
return True
class SimLyingRegArg(SimRegArg):
"""
A register that LIES about the types it holds
"""
def __init__(self, name):
super(SimLyingRegArg, self).__init__(name, 8)
def get_value(self, state, size=None, endness=None, **kwargs):
#val = super(SimLyingRegArg, self).get_value(state, **kwargs)
val = getattr(state.regs, self.reg_name)
if endness and endness != state.args.register_endness:
val = val.reversed
if size == 4:
val = claripy.fpToFP(claripy.fp.RM_RNE, val.raw_to_fp(), claripy.FSORT_FLOAT)
return val
def set_value(self, state, val, size=None, endness=None, **kwargs):
if size == 4:
if state.arch.register_endness == 'IEnd_LE' and endness == 'IEnd_BE':
# pylint: disable=no-member
val = claripy.fpToFP(claripy.fp.RM_RNE, val.reversed.raw_to_fp(), claripy.FSORT_DOUBLE).reversed
else:
val = claripy.fpToFP(claripy.fp.RM_RNE, val.raw_to_fp(), claripy.FSORT_DOUBLE)
if endness and endness != state.args.register_endness:
val = val.reversed
setattr(state.regs, self.reg_name, val)
#super(SimLyingRegArg, self).set_value(state, val, endness=endness, **kwargs)
class SimCCCdecl(SimCC):
ARG_REGS = [] # All arguments are passed in stack
FP_ARG_REGS = []
STACKARG_SP_DIFF = 4 # Return address is pushed on to stack by call
RETURN_VAL = SimRegArg('eax', 4)
FP_RETURN_VAL = SimLyingRegArg('st0')
RETURN_ADDR = SimStackArg(0, 4)
ARCH = archinfo.ArchX86
class SimCCStdcall(SimCCCdecl):
CALLEE_CLEANUP = True
class SimCCX86LinuxSyscall(SimCC):
ARG_REGS = ['ebx', 'ecx', 'edx', 'esi', 'edi', 'ebp']
FP_ARG_REGS = []
RETURN_VAL = SimRegArg('eax', 4)
RETURN_ADDR = SimRegArg('ip_at_syscall', 4)
ARCH = archinfo.ArchX86
@classmethod
def _match(cls, arch, args, sp_delta): # pylint: disable=unused-argument
# never appears anywhere except syscalls
return False
@staticmethod
def syscall_num(state):
return state.regs.eax
class SimCCX86WindowsSyscall(SimCC):
# TODO: Make sure the information is correct
ARG_REGS = [ ]
FP_ARG_REGS = [ ]
RETURN_VAL = SimRegArg('eax', 4)
RETURN_ADDR = SimRegArg('ip_at_syscall', 4)
ARCH = archinfo.ArchX86
@classmethod
def _match(cls, arch, args, sp_delta): # pylint: disable=unused-argument
# never appears anywhere except syscalls
return False
@staticmethod
def syscall_num(state):
return state.regs.eax
class SimCCSystemVAMD64(SimCC):
ARG_REGS = ['rdi', 'rsi', 'rdx', 'rcx', 'r8', 'r9']
FP_ARG_REGS = ['xmm0', 'xmm1', 'xmm2', 'xmm3', 'xmm4', 'xmm5', 'xmm6', 'xmm7']
STACKARG_SP_DIFF = 8 # Return address is pushed on to stack by call
RETURN_ADDR = SimStackArg(0, 8)
RETURN_VAL = SimRegArg('rax', 8)
FP_RETURN_VAL = SimRegArg('xmm0', 32)
ARCH = archinfo.ArchAMD64
def __init__(self, arch, args=None, ret_val=None, sp_delta=None, func_ty=None):
super(SimCCSystemVAMD64, self).__init__(arch, args, ret_val, sp_delta, func_ty)
# Remove the ret address on stack
if self.args is not None:
self.args = [ i for i in self.args if not (isinstance(i, SimStackArg) and i.stack_offset == 0x0) ]
@classmethod
def _match(cls, arch, args, sp_delta):
if cls.ARCH is not None and not isinstance(arch, cls.ARCH):
return False
#if sp_delta != cls.STACKARG_SP_DIFF:
# return False
sample_inst = cls(arch)
all_fp_args = list(sample_inst.fp_args)
all_int_args = list(sample_inst.int_args)
both_iter = sample_inst.both_args
some_both_args = [next(both_iter) for _ in xrange(len(args))]
for arg in args:
if arg not in all_fp_args and arg not in all_int_args and arg not in some_both_args:
if isinstance(arg, SimStackArg) and arg.stack_offset == 0:
continue # ignore return address?
return False
return True
class SimCCAMD64LinuxSyscall(SimCC):
ARG_REGS = ['rdi', 'rsi', 'rdx', 'r10', 'r8', 'r9']
RETURN_VAL = SimRegArg('rax', 8)
RETURN_ADDR = SimRegArg('ip_at_syscall', 8)
ARCH = archinfo.ArchAMD64
@staticmethod
def _match(arch, args, sp_delta): # pylint: disable=unused-argument
# doesn't appear anywhere but syscalls
return False
@staticmethod
def syscall_num(state):
return state.regs.rax
class SimCCAMD64WindowsSyscall(SimCC):
# TODO: Make sure the information is correct
ARG_REGS = [ ]
FP_ARG_REGS = [ ]
RETURN_VAL = SimRegArg('rax', 8)
RETURN_ADDR = SimRegArg('ip_at_syscall', 8)
ARCH = archinfo.ArchAMD64
@classmethod
def _match(cls, arch, args, sp_delta): # pylint: disable=unused-argument
# never appears anywhere except syscalls
return False
@staticmethod
def syscall_num(state):
return state.regs.rax
class SimCCARM(SimCC):
ARG_REGS = [ 'r0', 'r1', 'r2', 'r3' ]
FP_ARG_REGS = [] # TODO: ???
RETURN_ADDR = SimRegArg('lr', 4)
RETURN_VAL = SimRegArg('r0', 4)
ARCH = archinfo.ArchARM
class SimCCARMLinuxSyscall(SimCC):
# TODO: Make sure all the information is correct
ARG_REGS = [ 'r0', 'r1', 'r2', 'r3' ]
FP_ARG_REGS = [] # TODO: ???
RETURN_ADDR = SimRegArg('ip_at_syscall', 4)
RETURN_VAL = SimRegArg('r0', 4)
ARCH = archinfo.ArchARM
@classmethod
def _match(cls, arch, args, sp_delta): # pylint: disable=unused-argument
# never appears anywhere except syscalls
return False
@staticmethod
def syscall_num(state):
return state.regs.r7
class SimCCAArch64(SimCC):
ARG_REGS = [ 'x0', 'x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7' ]
FP_ARG_REGS = [] # TODO: ???
RETURN_ADDR = SimRegArg('lr', 8)
RETURN_VAL = SimRegArg('x0', 8)
ARCH = archinfo.ArchAArch64
class SimCCAArch64LinuxSyscall(SimCC):
# TODO: Make sure all the information is correct
ARG_REGS = [ 'x0', 'x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7' ]
FP_ARG_REGS = [] # TODO: ???
RETURN_VAL = SimRegArg('x0', 8)
RETURN_ADDR = SimRegArg('ip_at_syscall', 8)
ARCH = archinfo.ArchAArch64
@classmethod
def _match(cls, arch, args, sp_delta): # pylint: disable=unused-argument
# never appears anywhere except syscalls
return False
@staticmethod
def syscall_num(state):
return state.regs.x8
class SimCCO32(SimCC):
ARG_REGS = [ 'a0', 'a1', 'a2', 'a3' ]
FP_ARG_REGS = [] # TODO: ???
STACKARG_SP_BUFF = 16
RETURN_ADDR = SimRegArg('lr', 4)
RETURN_VAL = SimRegArg('v0', 4)
ARCH = archinfo.ArchMIPS32
class SimCCO32LinuxSyscall(SimCC):
# TODO: Make sure all the information is correct
ARG_REGS = [ 'a0', 'a1', 'a2', 'a3' ]
FP_ARG_REGS = [] # TODO: ???
RETURN_VAL = SimRegArg('v0', 4)
RETURN_ADDR = SimRegArg('ip_at_syscall', 4)
ARCH = archinfo.ArchMIPS32
@classmethod
def _match(cls, arch, args, sp_delta): # pylint: disable=unused-argument
# never appears anywhere except syscalls
return False
@staticmethod
def syscall_num(state):
return state.regs.v0
class SimCCO64(SimCC): # TODO: add n32 and n64
ARG_REGS = [ 'a0', 'a1', 'a2', 'a3' ]
FP_ARG_REGS = [] # TODO: ???
STACKARG_SP_BUFF = 32
RETURN_ADDR = SimRegArg('lr', 8)
RETURN_VAL = SimRegArg('v0', 8)
ARCH = archinfo.ArchMIPS64
class SimCCO64LinuxSyscall(SimCC):
# TODO: Make sure all the information is correct
ARG_REGS = [ 'a0', 'a1', 'a2', 'a3' ]
FP_ARG_REGS = [] # TODO: ???
RETURN_VAL = SimRegArg('v0', 8)
RETURN_ADDR = SimRegArg('ip_at_syscall', 8)
ARCH = archinfo.ArchMIPS64
@classmethod
def _match(cls, arch, args, sp_delta): # pylint: disable=unused-argument
# never appears anywhere except syscalls
return False
@staticmethod
def syscall_num(state):
return state.regs.v0
class SimCCPowerPC(SimCC):
ARG_REGS = [ 'r3', 'r4', 'r5', 'r6', 'r7', 'r8', 'r9', 'r10' ]
FP_ARG_REGS = [] # TODO: ???
STACKARG_SP_BUFF = 8
RETURN_ADDR = SimRegArg('lr', 4)
RETURN_VAL = SimRegArg('r3', 4)
ARCH = archinfo.ArchPPC32
class SimCCPowerPCLinuxSyscall(SimCC):
# TODO: Make sure all the information is correct
ARG_REGS = ['r3', 'r4', 'r5', 'r6', 'r7', 'r8', 'r9', 'r10']
FP_ARG_REGS = [ ]
RETURN_VAL = SimRegArg('r3', 4)
RETURN_ADDR = SimRegArg('ip_at_syscall', 4)
ARCH = archinfo.ArchPPC32
@classmethod
def _match(cls, arch, args, sp_delta): # pylint: disable=unused-argument
# never appears anywhere except syscalls
return False
@staticmethod
def syscall_num(state):
return state.regs.r0
class SimCCPowerPC64(SimCC):
ARG_REGS = [ 'r3', 'r4', 'r5', 'r6', 'r7', 'r8', 'r9', 'r10' ]
FP_ARG_REGS = [] # TODO: ???
STACKARG_SP_BUFF = 0x70
RETURN_ADDR = SimRegArg('lr', 8)
RETURN_VAL = SimRegArg('r3', 8)
ARCH = archinfo.ArchPPC64
class SimCCPowerPC64LinuxSyscall(SimCC):
# TODO: Make sure all the information is correct
ARG_REGS = [ ]
FP_ARG_REGS = [ ]
RETURN_VAL = SimRegArg('r3', 8)
RETURN_ADDR = SimRegArg('ip_at_syscall', 8)
ARCH = archinfo.ArchPPC64
@classmethod
def _match(cls, arch, args, sp_delta): # pylint: disable=unused-argument
# never appears anywhere except syscalls
return False
@staticmethod
def syscall_num(state):
return state.regs.r0
class SimCCUnknown(SimCC):
"""
Represent an unknown calling convention.
"""
@staticmethod
def _match(arch, args, sp_delta): # pylint: disable=unused-argument
# It always returns True
return True
def __repr__(self):
return "<SimCCUnknown - %s %s sp_delta=%d>" % (self.arch.name, self.args, self.sp_delta)
CC = [ SimCCCdecl, SimCCSystemVAMD64, SimCCARM, SimCCO32, SimCCO64, SimCCPowerPC, SimCCPowerPC64, SimCCAArch64 ]
DEFAULT_CC = {
'AMD64': SimCCSystemVAMD64,
'X86': SimCCCdecl,
'ARMEL': SimCCARM,
'ARMHF': SimCCARM,
'MIPS32': SimCCO32,
'MIPS64': SimCCO64,
'PPC32': SimCCPowerPC,
'PPC64': SimCCPowerPC64,
'AARCH64': SimCCAArch64,
'AVR': SimCCUnknown,
'MSP': SimCCUnknown
}
def register_default_cc(arch, cc):
DEFAULT_CC[arch] = cc
SYSCALL_CC = {
'X86': {
'default': SimCCX86LinuxSyscall,
'Linux': SimCCX86LinuxSyscall,
'Windows': SimCCX86WindowsSyscall,
'CGC': SimCCX86LinuxSyscall,
},
'AMD64': {
'default': SimCCAMD64LinuxSyscall,
'Linux': SimCCAMD64LinuxSyscall,
'Windows': SimCCAMD64WindowsSyscall,
},
'ARMEL': {
'default': SimCCARMLinuxSyscall,
'Linux': SimCCARMLinuxSyscall,
},
'ARMHF': {
'default': SimCCARMLinuxSyscall,
'Linux': SimCCARMLinuxSyscall,
},
'AARCH64': {
'default': SimCCAArch64LinuxSyscall,
'Linux': SimCCAArch64LinuxSyscall,
},
'MIPS32': {
'default': SimCCO32LinuxSyscall,
'Linux': SimCCO32LinuxSyscall,
},
'MIPS64': {
'default': SimCCO64LinuxSyscall,
'Linux': SimCCO64LinuxSyscall,
},
'PPC32': {
'default': SimCCPowerPCLinuxSyscall,
'Linux': SimCCPowerPCLinuxSyscall,
},
'PPC64': {
'default': SimCCPowerPC64LinuxSyscall,
'Linux': SimCCPowerPC64LinuxSyscall,
},
}
def register_syscall_cc(arch, os, cc):
if not SYSCALL_CC.has_key(arch):
SYSCALL_CC[arch] = {}
SYSCALL_CC[arch][os] = cc
SyscallCC = SYSCALL_CC
DefaultCC = DEFAULT_CC
|
|
"""The tests for the group cover platform."""
from datetime import timedelta
import pytest
from homeassistant.components.cover import (
ATTR_CURRENT_POSITION,
ATTR_CURRENT_TILT_POSITION,
ATTR_POSITION,
ATTR_TILT_POSITION,
DOMAIN,
)
from homeassistant.components.group.cover import DEFAULT_NAME
from homeassistant.const import (
ATTR_ASSUMED_STATE,
ATTR_ENTITY_ID,
ATTR_FRIENDLY_NAME,
ATTR_SUPPORTED_FEATURES,
CONF_ENTITIES,
CONF_UNIQUE_ID,
SERVICE_CLOSE_COVER,
SERVICE_CLOSE_COVER_TILT,
SERVICE_OPEN_COVER,
SERVICE_OPEN_COVER_TILT,
SERVICE_SET_COVER_POSITION,
SERVICE_SET_COVER_TILT_POSITION,
SERVICE_STOP_COVER,
SERVICE_STOP_COVER_TILT,
SERVICE_TOGGLE,
SERVICE_TOGGLE_COVER_TILT,
STATE_CLOSED,
STATE_CLOSING,
STATE_OPEN,
STATE_OPENING,
)
from homeassistant.helpers import entity_registry as er
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import assert_setup_component, async_fire_time_changed
COVER_GROUP = "cover.cover_group"
DEMO_COVER = "cover.kitchen_window"
DEMO_COVER_POS = "cover.hall_window"
DEMO_COVER_TILT = "cover.living_room_window"
DEMO_TILT = "cover.tilt_demo"
CONFIG_ALL = {
DOMAIN: [
{"platform": "demo"},
{
"platform": "group",
CONF_ENTITIES: [DEMO_COVER, DEMO_COVER_POS, DEMO_COVER_TILT, DEMO_TILT],
},
]
}
CONFIG_POS = {
DOMAIN: [
{"platform": "demo"},
{
"platform": "group",
CONF_ENTITIES: [DEMO_COVER_POS, DEMO_COVER_TILT, DEMO_TILT],
},
]
}
CONFIG_TILT_ONLY = {
DOMAIN: [
{"platform": "demo"},
{
"platform": "group",
CONF_ENTITIES: [DEMO_COVER_TILT, DEMO_TILT],
},
]
}
CONFIG_ATTRIBUTES = {
DOMAIN: {
"platform": "group",
CONF_ENTITIES: [DEMO_COVER, DEMO_COVER_POS, DEMO_COVER_TILT, DEMO_TILT],
CONF_UNIQUE_ID: "unique_identifier",
}
}
@pytest.fixture
async def setup_comp(hass, config_count):
"""Set up group cover component."""
config, count = config_count
with assert_setup_component(count, DOMAIN):
await async_setup_component(hass, DOMAIN, config)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
@pytest.mark.parametrize("config_count", [(CONFIG_ATTRIBUTES, 1)])
async def test_attributes(hass, setup_comp):
"""Test handling of state attributes."""
state = hass.states.get(COVER_GROUP)
assert state.state == STATE_CLOSED
assert state.attributes[ATTR_FRIENDLY_NAME] == DEFAULT_NAME
assert state.attributes[ATTR_ENTITY_ID] == [
DEMO_COVER,
DEMO_COVER_POS,
DEMO_COVER_TILT,
DEMO_TILT,
]
assert ATTR_ASSUMED_STATE not in state.attributes
assert state.attributes[ATTR_SUPPORTED_FEATURES] == 0
assert ATTR_CURRENT_POSITION not in state.attributes
assert ATTR_CURRENT_TILT_POSITION not in state.attributes
# Add Entity that supports open / close / stop
hass.states.async_set(DEMO_COVER, STATE_OPEN, {ATTR_SUPPORTED_FEATURES: 11})
await hass.async_block_till_done()
state = hass.states.get(COVER_GROUP)
assert state.state == STATE_OPEN
assert ATTR_ASSUMED_STATE not in state.attributes
assert state.attributes[ATTR_SUPPORTED_FEATURES] == 11
assert ATTR_CURRENT_POSITION not in state.attributes
assert ATTR_CURRENT_TILT_POSITION not in state.attributes
# Add Entity that supports set_cover_position
hass.states.async_set(
DEMO_COVER_POS,
STATE_OPEN,
{ATTR_SUPPORTED_FEATURES: 4, ATTR_CURRENT_POSITION: 70},
)
await hass.async_block_till_done()
state = hass.states.get(COVER_GROUP)
assert state.state == STATE_OPEN
assert ATTR_ASSUMED_STATE not in state.attributes
assert state.attributes[ATTR_SUPPORTED_FEATURES] == 15
assert state.attributes[ATTR_CURRENT_POSITION] == 70
assert ATTR_CURRENT_TILT_POSITION not in state.attributes
# Add Entity that supports open tilt / close tilt / stop tilt
hass.states.async_set(DEMO_TILT, STATE_OPEN, {ATTR_SUPPORTED_FEATURES: 112})
await hass.async_block_till_done()
state = hass.states.get(COVER_GROUP)
assert state.state == STATE_OPEN
assert ATTR_ASSUMED_STATE not in state.attributes
assert state.attributes[ATTR_SUPPORTED_FEATURES] == 127
assert state.attributes[ATTR_CURRENT_POSITION] == 70
assert ATTR_CURRENT_TILT_POSITION not in state.attributes
# Add Entity that supports set_tilt_position
hass.states.async_set(
DEMO_COVER_TILT,
STATE_OPEN,
{ATTR_SUPPORTED_FEATURES: 128, ATTR_CURRENT_TILT_POSITION: 60},
)
await hass.async_block_till_done()
state = hass.states.get(COVER_GROUP)
assert state.state == STATE_OPEN
assert ATTR_ASSUMED_STATE not in state.attributes
assert state.attributes[ATTR_SUPPORTED_FEATURES] == 255
assert state.attributes[ATTR_CURRENT_POSITION] == 70
assert state.attributes[ATTR_CURRENT_TILT_POSITION] == 60
# ### Test assumed state ###
# ##########################
# For covers
hass.states.async_set(
DEMO_COVER, STATE_OPEN, {ATTR_SUPPORTED_FEATURES: 4, ATTR_CURRENT_POSITION: 100}
)
await hass.async_block_till_done()
state = hass.states.get(COVER_GROUP)
assert state.state == STATE_OPEN
assert state.attributes[ATTR_ASSUMED_STATE] is True
assert state.attributes[ATTR_SUPPORTED_FEATURES] == 244
assert state.attributes[ATTR_CURRENT_POSITION] == 85 # (70 + 100) / 2
assert state.attributes[ATTR_CURRENT_TILT_POSITION] == 60
hass.states.async_remove(DEMO_COVER)
hass.states.async_remove(DEMO_COVER_POS)
await hass.async_block_till_done()
state = hass.states.get(COVER_GROUP)
assert state.state == STATE_OPEN
assert ATTR_ASSUMED_STATE not in state.attributes
assert state.attributes[ATTR_SUPPORTED_FEATURES] == 240
assert ATTR_CURRENT_POSITION not in state.attributes
assert state.attributes[ATTR_CURRENT_TILT_POSITION] == 60
# For tilts
hass.states.async_set(
DEMO_TILT,
STATE_OPEN,
{ATTR_SUPPORTED_FEATURES: 128, ATTR_CURRENT_TILT_POSITION: 100},
)
await hass.async_block_till_done()
state = hass.states.get(COVER_GROUP)
assert state.state == STATE_OPEN
assert state.attributes[ATTR_ASSUMED_STATE] is True
assert state.attributes[ATTR_SUPPORTED_FEATURES] == 128
assert ATTR_CURRENT_POSITION not in state.attributes
assert state.attributes[ATTR_CURRENT_TILT_POSITION] == 80 # (60 + 100) / 2
hass.states.async_remove(DEMO_COVER_TILT)
hass.states.async_set(DEMO_TILT, STATE_CLOSED)
await hass.async_block_till_done()
state = hass.states.get(COVER_GROUP)
assert state.state == STATE_CLOSED
assert ATTR_ASSUMED_STATE not in state.attributes
assert state.attributes[ATTR_SUPPORTED_FEATURES] == 0
assert ATTR_CURRENT_POSITION not in state.attributes
assert ATTR_CURRENT_TILT_POSITION not in state.attributes
hass.states.async_set(DEMO_TILT, STATE_CLOSED, {ATTR_ASSUMED_STATE: True})
await hass.async_block_till_done()
state = hass.states.get(COVER_GROUP)
assert state.attributes[ATTR_ASSUMED_STATE] is True
entity_registry = er.async_get(hass)
entry = entity_registry.async_get(COVER_GROUP)
assert entry
assert entry.unique_id == "unique_identifier"
@pytest.mark.parametrize("config_count", [(CONFIG_TILT_ONLY, 2)])
async def test_cover_that_only_supports_tilt_removed(hass, setup_comp):
"""Test removing a cover that support tilt."""
hass.states.async_set(
DEMO_COVER_TILT,
STATE_OPEN,
{ATTR_SUPPORTED_FEATURES: 128, ATTR_CURRENT_TILT_POSITION: 60},
)
hass.states.async_set(
DEMO_TILT,
STATE_OPEN,
{ATTR_SUPPORTED_FEATURES: 128, ATTR_CURRENT_TILT_POSITION: 60},
)
state = hass.states.get(COVER_GROUP)
assert state.state == STATE_OPEN
assert state.attributes[ATTR_FRIENDLY_NAME] == DEFAULT_NAME
assert state.attributes[ATTR_ENTITY_ID] == [
DEMO_COVER_TILT,
DEMO_TILT,
]
assert ATTR_ASSUMED_STATE not in state.attributes
assert ATTR_CURRENT_TILT_POSITION in state.attributes
hass.states.async_remove(DEMO_COVER_TILT)
hass.states.async_set(DEMO_TILT, STATE_CLOSED)
await hass.async_block_till_done()
@pytest.mark.parametrize("config_count", [(CONFIG_ALL, 2)])
async def test_open_covers(hass, setup_comp):
"""Test open cover function."""
await hass.services.async_call(
DOMAIN, SERVICE_OPEN_COVER, {ATTR_ENTITY_ID: COVER_GROUP}, blocking=True
)
for _ in range(10):
future = dt_util.utcnow() + timedelta(seconds=1)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get(COVER_GROUP)
assert state.state == STATE_OPEN
assert state.attributes[ATTR_CURRENT_POSITION] == 100
assert hass.states.get(DEMO_COVER).state == STATE_OPEN
assert hass.states.get(DEMO_COVER_POS).attributes[ATTR_CURRENT_POSITION] == 100
assert hass.states.get(DEMO_COVER_TILT).attributes[ATTR_CURRENT_POSITION] == 100
@pytest.mark.parametrize("config_count", [(CONFIG_ALL, 2)])
async def test_close_covers(hass, setup_comp):
"""Test close cover function."""
await hass.services.async_call(
DOMAIN, SERVICE_CLOSE_COVER, {ATTR_ENTITY_ID: COVER_GROUP}, blocking=True
)
for _ in range(10):
future = dt_util.utcnow() + timedelta(seconds=1)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get(COVER_GROUP)
assert state.state == STATE_CLOSED
assert state.attributes[ATTR_CURRENT_POSITION] == 0
assert hass.states.get(DEMO_COVER).state == STATE_CLOSED
assert hass.states.get(DEMO_COVER_POS).attributes[ATTR_CURRENT_POSITION] == 0
assert hass.states.get(DEMO_COVER_TILT).attributes[ATTR_CURRENT_POSITION] == 0
@pytest.mark.parametrize("config_count", [(CONFIG_ALL, 2)])
async def test_toggle_covers(hass, setup_comp):
"""Test toggle cover function."""
# Start covers in open state
await hass.services.async_call(
DOMAIN, SERVICE_OPEN_COVER, {ATTR_ENTITY_ID: COVER_GROUP}, blocking=True
)
for _ in range(10):
future = dt_util.utcnow() + timedelta(seconds=1)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get(COVER_GROUP)
assert state.state == STATE_OPEN
# Toggle will close covers
await hass.services.async_call(
DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: COVER_GROUP}, blocking=True
)
for _ in range(10):
future = dt_util.utcnow() + timedelta(seconds=1)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get(COVER_GROUP)
assert state.state == STATE_CLOSED
assert state.attributes[ATTR_CURRENT_POSITION] == 0
assert hass.states.get(DEMO_COVER).state == STATE_CLOSED
assert hass.states.get(DEMO_COVER_POS).attributes[ATTR_CURRENT_POSITION] == 0
assert hass.states.get(DEMO_COVER_TILT).attributes[ATTR_CURRENT_POSITION] == 0
# Toggle again will open covers
await hass.services.async_call(
DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: COVER_GROUP}, blocking=True
)
for _ in range(10):
future = dt_util.utcnow() + timedelta(seconds=1)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get(COVER_GROUP)
assert state.state == STATE_OPEN
assert state.attributes[ATTR_CURRENT_POSITION] == 100
assert hass.states.get(DEMO_COVER).state == STATE_OPEN
assert hass.states.get(DEMO_COVER_POS).attributes[ATTR_CURRENT_POSITION] == 100
assert hass.states.get(DEMO_COVER_TILT).attributes[ATTR_CURRENT_POSITION] == 100
@pytest.mark.parametrize("config_count", [(CONFIG_ALL, 2)])
async def test_stop_covers(hass, setup_comp):
"""Test stop cover function."""
await hass.services.async_call(
DOMAIN, SERVICE_OPEN_COVER, {ATTR_ENTITY_ID: COVER_GROUP}, blocking=True
)
future = dt_util.utcnow() + timedelta(seconds=1)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
await hass.services.async_call(
DOMAIN, SERVICE_STOP_COVER, {ATTR_ENTITY_ID: COVER_GROUP}, blocking=True
)
future = dt_util.utcnow() + timedelta(seconds=1)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get(COVER_GROUP)
assert state.state == STATE_OPENING
assert state.attributes[ATTR_CURRENT_POSITION] == 50 # (20 + 80) / 2
assert hass.states.get(DEMO_COVER).state == STATE_OPEN
assert hass.states.get(DEMO_COVER_POS).attributes[ATTR_CURRENT_POSITION] == 20
assert hass.states.get(DEMO_COVER_TILT).attributes[ATTR_CURRENT_POSITION] == 80
@pytest.mark.parametrize("config_count", [(CONFIG_ALL, 2)])
async def test_set_cover_position(hass, setup_comp):
"""Test set cover position function."""
await hass.services.async_call(
DOMAIN,
SERVICE_SET_COVER_POSITION,
{ATTR_ENTITY_ID: COVER_GROUP, ATTR_POSITION: 50},
blocking=True,
)
for _ in range(4):
future = dt_util.utcnow() + timedelta(seconds=1)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get(COVER_GROUP)
assert state.state == STATE_OPEN
assert state.attributes[ATTR_CURRENT_POSITION] == 50
assert hass.states.get(DEMO_COVER).state == STATE_CLOSED
assert hass.states.get(DEMO_COVER_POS).attributes[ATTR_CURRENT_POSITION] == 50
assert hass.states.get(DEMO_COVER_TILT).attributes[ATTR_CURRENT_POSITION] == 50
@pytest.mark.parametrize("config_count", [(CONFIG_ALL, 2)])
async def test_open_tilts(hass, setup_comp):
"""Test open tilt function."""
await hass.services.async_call(
DOMAIN, SERVICE_OPEN_COVER_TILT, {ATTR_ENTITY_ID: COVER_GROUP}, blocking=True
)
for _ in range(5):
future = dt_util.utcnow() + timedelta(seconds=1)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get(COVER_GROUP)
assert state.state == STATE_OPEN
assert state.attributes[ATTR_CURRENT_TILT_POSITION] == 100
assert (
hass.states.get(DEMO_COVER_TILT).attributes[ATTR_CURRENT_TILT_POSITION] == 100
)
@pytest.mark.parametrize("config_count", [(CONFIG_ALL, 2)])
async def test_close_tilts(hass, setup_comp):
"""Test close tilt function."""
await hass.services.async_call(
DOMAIN, SERVICE_CLOSE_COVER_TILT, {ATTR_ENTITY_ID: COVER_GROUP}, blocking=True
)
for _ in range(5):
future = dt_util.utcnow() + timedelta(seconds=1)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get(COVER_GROUP)
assert state.state == STATE_OPEN
assert state.attributes[ATTR_CURRENT_TILT_POSITION] == 0
assert hass.states.get(DEMO_COVER_TILT).attributes[ATTR_CURRENT_TILT_POSITION] == 0
@pytest.mark.parametrize("config_count", [(CONFIG_ALL, 2)])
async def test_toggle_tilts(hass, setup_comp):
"""Test toggle tilt function."""
# Start tilted open
await hass.services.async_call(
DOMAIN, SERVICE_OPEN_COVER_TILT, {ATTR_ENTITY_ID: COVER_GROUP}, blocking=True
)
for _ in range(10):
future = dt_util.utcnow() + timedelta(seconds=1)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get(COVER_GROUP)
assert state.state == STATE_OPEN
assert state.attributes[ATTR_CURRENT_TILT_POSITION] == 100
assert (
hass.states.get(DEMO_COVER_TILT).attributes[ATTR_CURRENT_TILT_POSITION] == 100
)
# Toggle will tilt closed
await hass.services.async_call(
DOMAIN, SERVICE_TOGGLE_COVER_TILT, {ATTR_ENTITY_ID: COVER_GROUP}, blocking=True
)
for _ in range(10):
future = dt_util.utcnow() + timedelta(seconds=1)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get(COVER_GROUP)
assert state.state == STATE_OPEN
assert state.attributes[ATTR_CURRENT_TILT_POSITION] == 0
assert hass.states.get(DEMO_COVER_TILT).attributes[ATTR_CURRENT_TILT_POSITION] == 0
# Toggle again will tilt open
await hass.services.async_call(
DOMAIN, SERVICE_TOGGLE_COVER_TILT, {ATTR_ENTITY_ID: COVER_GROUP}, blocking=True
)
for _ in range(10):
future = dt_util.utcnow() + timedelta(seconds=1)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get(COVER_GROUP)
assert state.state == STATE_OPEN
assert state.attributes[ATTR_CURRENT_TILT_POSITION] == 100
assert (
hass.states.get(DEMO_COVER_TILT).attributes[ATTR_CURRENT_TILT_POSITION] == 100
)
@pytest.mark.parametrize("config_count", [(CONFIG_ALL, 2)])
async def test_stop_tilts(hass, setup_comp):
"""Test stop tilts function."""
await hass.services.async_call(
DOMAIN, SERVICE_OPEN_COVER_TILT, {ATTR_ENTITY_ID: COVER_GROUP}, blocking=True
)
future = dt_util.utcnow() + timedelta(seconds=1)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
await hass.services.async_call(
DOMAIN, SERVICE_STOP_COVER_TILT, {ATTR_ENTITY_ID: COVER_GROUP}, blocking=True
)
future = dt_util.utcnow() + timedelta(seconds=1)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get(COVER_GROUP)
assert state.state == STATE_OPEN
assert state.attributes[ATTR_CURRENT_TILT_POSITION] == 60
assert hass.states.get(DEMO_COVER_TILT).attributes[ATTR_CURRENT_TILT_POSITION] == 60
@pytest.mark.parametrize("config_count", [(CONFIG_ALL, 2)])
async def test_set_tilt_positions(hass, setup_comp):
"""Test set tilt position function."""
await hass.services.async_call(
DOMAIN,
SERVICE_SET_COVER_TILT_POSITION,
{ATTR_ENTITY_ID: COVER_GROUP, ATTR_TILT_POSITION: 80},
blocking=True,
)
for _ in range(3):
future = dt_util.utcnow() + timedelta(seconds=1)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get(COVER_GROUP)
assert state.state == STATE_OPEN
assert state.attributes[ATTR_CURRENT_TILT_POSITION] == 80
assert hass.states.get(DEMO_COVER_TILT).attributes[ATTR_CURRENT_TILT_POSITION] == 80
@pytest.mark.parametrize("config_count", [(CONFIG_POS, 2)])
async def test_is_opening_closing(hass, setup_comp):
"""Test is_opening property."""
await hass.services.async_call(
DOMAIN, SERVICE_OPEN_COVER, {ATTR_ENTITY_ID: COVER_GROUP}, blocking=True
)
await hass.async_block_till_done()
# Both covers opening -> opening
assert hass.states.get(DEMO_COVER_POS).state == STATE_OPENING
assert hass.states.get(DEMO_COVER_TILT).state == STATE_OPENING
assert hass.states.get(COVER_GROUP).state == STATE_OPENING
for _ in range(10):
future = dt_util.utcnow() + timedelta(seconds=1)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
await hass.services.async_call(
DOMAIN, SERVICE_CLOSE_COVER, {ATTR_ENTITY_ID: COVER_GROUP}, blocking=True
)
# Both covers closing -> closing
assert hass.states.get(DEMO_COVER_POS).state == STATE_CLOSING
assert hass.states.get(DEMO_COVER_TILT).state == STATE_CLOSING
assert hass.states.get(COVER_GROUP).state == STATE_CLOSING
hass.states.async_set(DEMO_COVER_POS, STATE_OPENING, {ATTR_SUPPORTED_FEATURES: 11})
await hass.async_block_till_done()
# Closing + Opening -> Opening
assert hass.states.get(DEMO_COVER_TILT).state == STATE_CLOSING
assert hass.states.get(DEMO_COVER_POS).state == STATE_OPENING
assert hass.states.get(COVER_GROUP).state == STATE_OPENING
hass.states.async_set(DEMO_COVER_POS, STATE_CLOSING, {ATTR_SUPPORTED_FEATURES: 11})
await hass.async_block_till_done()
# Both covers closing -> closing
assert hass.states.get(DEMO_COVER_TILT).state == STATE_CLOSING
assert hass.states.get(DEMO_COVER_POS).state == STATE_CLOSING
assert hass.states.get(COVER_GROUP).state == STATE_CLOSING
# Closed + Closing -> Closing
hass.states.async_set(DEMO_COVER_POS, STATE_CLOSED, {ATTR_SUPPORTED_FEATURES: 11})
await hass.async_block_till_done()
assert hass.states.get(DEMO_COVER_TILT).state == STATE_CLOSING
assert hass.states.get(DEMO_COVER_POS).state == STATE_CLOSED
assert hass.states.get(COVER_GROUP).state == STATE_CLOSING
# Open + Closing -> Closing
hass.states.async_set(DEMO_COVER_POS, STATE_OPEN, {ATTR_SUPPORTED_FEATURES: 11})
await hass.async_block_till_done()
assert hass.states.get(DEMO_COVER_TILT).state == STATE_CLOSING
assert hass.states.get(DEMO_COVER_POS).state == STATE_OPEN
assert hass.states.get(COVER_GROUP).state == STATE_CLOSING
# Closed + Opening -> Closing
hass.states.async_set(DEMO_COVER_TILT, STATE_OPENING, {ATTR_SUPPORTED_FEATURES: 11})
hass.states.async_set(DEMO_COVER_POS, STATE_CLOSED, {ATTR_SUPPORTED_FEATURES: 11})
await hass.async_block_till_done()
assert hass.states.get(DEMO_COVER_TILT).state == STATE_OPENING
assert hass.states.get(DEMO_COVER_POS).state == STATE_CLOSED
assert hass.states.get(COVER_GROUP).state == STATE_OPENING
# Open + Opening -> Closing
hass.states.async_set(DEMO_COVER_POS, STATE_OPEN, {ATTR_SUPPORTED_FEATURES: 11})
await hass.async_block_till_done()
assert hass.states.get(DEMO_COVER_TILT).state == STATE_OPENING
assert hass.states.get(DEMO_COVER_POS).state == STATE_OPEN
assert hass.states.get(COVER_GROUP).state == STATE_OPENING
|
|
# Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=too-many-arguments, too-many-locals, too-many-branches
# pylint: disable=too-many-return-statements
import json
from glob import glob
from os.path import isdir, join
import click
import semantic_version
from platformio import app, exception, util
from platformio.compat import glob_escape
from platformio.managers.package import BasePkgManager
from platformio.managers.platform import PlatformFactory, PlatformManager
from platformio.package.exception import ManifestException
from platformio.package.manifest.parser import ManifestParserFactory
from platformio.project.config import ProjectConfig
class LibraryManager(BasePkgManager):
FILE_CACHE_VALID = "30d" # 1 month
def __init__(self, package_dir=None):
self.config = ProjectConfig.get_instance()
super(LibraryManager, self).__init__(
package_dir or self.config.get_optional_dir("globallib")
)
@property
def manifest_names(self):
return [".library.json", "library.json", "library.properties", "module.json"]
def get_manifest_path(self, pkg_dir):
path = BasePkgManager.get_manifest_path(self, pkg_dir)
if path:
return path
# if library without manifest, returns first source file
src_dir = join(glob_escape(pkg_dir))
if isdir(join(pkg_dir, "src")):
src_dir = join(src_dir, "src")
chs_files = glob(join(src_dir, "*.[chS]"))
if chs_files:
return chs_files[0]
cpp_files = glob(join(src_dir, "*.cpp"))
if cpp_files:
return cpp_files[0]
return None
def max_satisfying_repo_version(self, versions, requirements=None):
def _cmp_dates(datestr1, datestr2):
date1 = util.parse_date(datestr1)
date2 = util.parse_date(datestr2)
if date1 == date2:
return 0
return -1 if date1 < date2 else 1
semver_spec = None
try:
semver_spec = (
semantic_version.SimpleSpec(requirements) if requirements else None
)
except ValueError:
pass
item = {}
for v in versions:
semver_new = self.parse_semver_version(v["name"])
if semver_spec:
# pylint: disable=unsupported-membership-test
if not semver_new or semver_new not in semver_spec:
continue
if not item or self.parse_semver_version(item["name"]) < semver_new:
item = v
elif requirements:
if requirements == v["name"]:
return v
else:
if not item or _cmp_dates(item["released"], v["released"]) == -1:
item = v
return item
def get_latest_repo_version(self, name, requirements, silent=False):
item = self.max_satisfying_repo_version(
util.get_api_result(
"/lib/info/%d"
% self.search_lib_id(
{"name": name, "requirements": requirements}, silent=silent
),
cache_valid="1h",
)["versions"],
requirements,
)
return item["name"] if item else None
def _install_from_piorepo(self, name, requirements):
assert name.startswith("id="), name
version = self.get_latest_repo_version(name, requirements)
if not version:
raise exception.UndefinedPackageVersion(
requirements or "latest", util.get_systype()
)
dl_data = util.get_api_result(
"/lib/download/" + str(name[3:]), dict(version=version), cache_valid="30d"
)
assert dl_data
return self._install_from_url(
name,
dl_data["url"].replace("http://", "https://")
if app.get_setting("strict_ssl")
else dl_data["url"],
requirements,
)
def search_lib_id( # pylint: disable=too-many-branches
self, filters, silent=False, interactive=False
):
assert isinstance(filters, dict)
assert "name" in filters
# try to find ID within installed packages
lib_id = self._get_lib_id_from_installed(filters)
if lib_id:
return lib_id
# looking in PIO Library Registry
if not silent:
click.echo(
"Looking for %s library in registry"
% click.style(filters["name"], fg="cyan")
)
query = []
for key in filters:
if key not in ("name", "authors", "frameworks", "platforms"):
continue
values = filters[key]
if not isinstance(values, list):
values = [v.strip() for v in values.split(",") if v]
for value in values:
query.append(
'%s:"%s"' % (key[:-1] if key.endswith("s") else key, value)
)
lib_info = None
result = util.get_api_result(
"/v2/lib/search", dict(query=" ".join(query)), cache_valid="1h"
)
if result["total"] == 1:
lib_info = result["items"][0]
elif result["total"] > 1:
if silent and not interactive:
lib_info = result["items"][0]
else:
click.secho(
"Conflict: More than one library has been found "
"by request %s:" % json.dumps(filters),
fg="yellow",
err=True,
)
# pylint: disable=import-outside-toplevel
from platformio.commands.lib import print_lib_item
for item in result["items"]:
print_lib_item(item)
if not interactive:
click.secho(
"Automatically chose the first available library "
"(use `--interactive` option to make a choice)",
fg="yellow",
err=True,
)
lib_info = result["items"][0]
else:
deplib_id = click.prompt(
"Please choose library ID",
type=click.Choice([str(i["id"]) for i in result["items"]]),
)
for item in result["items"]:
if item["id"] == int(deplib_id):
lib_info = item
break
if not lib_info:
if list(filters) == ["name"]:
raise exception.LibNotFound(filters["name"])
raise exception.LibNotFound(str(filters))
if not silent:
click.echo(
"Found: %s"
% click.style(
"https://platformio.org/lib/show/{id}/{name}".format(**lib_info),
fg="blue",
)
)
return int(lib_info["id"])
def _get_lib_id_from_installed(self, filters):
if filters["name"].startswith("id="):
return int(filters["name"][3:])
package_dir = self.get_package_dir(
filters["name"], filters.get("requirements", filters.get("version"))
)
if not package_dir:
return None
manifest = self.load_manifest(package_dir)
if "id" not in manifest:
return None
for key in ("frameworks", "platforms"):
if key not in filters:
continue
if key not in manifest:
return None
if not util.items_in_list(
util.items_to_list(filters[key]), util.items_to_list(manifest[key])
):
return None
if "authors" in filters:
if "authors" not in manifest:
return None
manifest_authors = manifest["authors"]
if not isinstance(manifest_authors, list):
manifest_authors = [manifest_authors]
manifest_authors = [
a["name"]
for a in manifest_authors
if isinstance(a, dict) and "name" in a
]
filter_authors = filters["authors"]
if not isinstance(filter_authors, list):
filter_authors = [filter_authors]
if not set(filter_authors) <= set(manifest_authors):
return None
return int(manifest["id"])
def install( # pylint: disable=arguments-differ
self,
name,
requirements=None,
silent=False,
after_update=False,
interactive=False,
force=False,
):
_name, _requirements, _url = self.parse_pkg_uri(name, requirements)
if not _url:
name = "id=%d" % self.search_lib_id(
{"name": _name, "requirements": _requirements},
silent=silent,
interactive=interactive,
)
requirements = _requirements
pkg_dir = BasePkgManager.install(
self,
name,
requirements,
silent=silent,
after_update=after_update,
force=force,
)
if not pkg_dir:
return None
manifest = None
try:
manifest = ManifestParserFactory.new_from_dir(pkg_dir).as_dict()
except ManifestException:
pass
if not manifest or not manifest.get("dependencies"):
return pkg_dir
if not silent:
click.secho("Installing dependencies", fg="yellow")
builtin_lib_storages = None
for filters in manifest["dependencies"]:
assert "name" in filters
# avoid circle dependencies
if not self.INSTALL_HISTORY:
self.INSTALL_HISTORY = []
history_key = str(filters)
if history_key in self.INSTALL_HISTORY:
continue
self.INSTALL_HISTORY.append(history_key)
if any(s in filters.get("version", "") for s in ("\\", "/")):
self.install(
"{name}={version}".format(**filters),
silent=silent,
after_update=after_update,
interactive=interactive,
force=force,
)
else:
try:
lib_id = self.search_lib_id(filters, silent, interactive)
except exception.LibNotFound as e:
if builtin_lib_storages is None:
builtin_lib_storages = get_builtin_libs()
if not silent or is_builtin_lib(
builtin_lib_storages, filters["name"]
):
click.secho("Warning! %s" % e, fg="yellow")
continue
if filters.get("version"):
self.install(
lib_id,
filters.get("version"),
silent=silent,
after_update=after_update,
interactive=interactive,
force=force,
)
else:
self.install(
lib_id,
silent=silent,
after_update=after_update,
interactive=interactive,
force=force,
)
return pkg_dir
def get_builtin_libs(storage_names=None):
items = []
storage_names = storage_names or []
pm = PlatformManager()
for manifest in pm.get_installed():
p = PlatformFactory.newPlatform(manifest["__pkg_dir"])
for storage in p.get_lib_storages():
if storage_names and storage["name"] not in storage_names:
continue
lm = LibraryManager(storage["path"])
items.append(
{
"name": storage["name"],
"path": storage["path"],
"items": lm.get_installed(),
}
)
return items
def is_builtin_lib(storages, name):
for storage in storages or []:
if any(l.get("name") == name for l in storage["items"]):
return True
return False
|
|
#!/usr/bin/env python
#
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Dump the state of the world for post mortem."""
import argparse
import datetime
from distutils import spawn
import fnmatch
import os
import os.path
import subprocess
import sys
def get_options():
parser = argparse.ArgumentParser(
description='Dump world state for debugging')
parser.add_argument('-d', '--dir',
default='.',
help='Output directory for worlddump')
parser.add_argument('-n', '--name',
default='',
help='Additional name to tag into file')
return parser.parse_args()
def filename(dirname, name=""):
now = datetime.datetime.utcnow()
fmt = "worlddump-%Y-%m-%d-%H%M%S"
if name:
fmt += "-" + name
fmt += ".txt"
return os.path.join(dirname, now.strftime(fmt))
def warn(msg):
print "WARN: %s" % msg
def _dump_cmd(cmd):
print cmd
print "-" * len(cmd)
print
try:
subprocess.check_call(cmd, shell=True)
print
except subprocess.CalledProcessError:
print "*** Failed to run: %s" % cmd
def _find_cmd(cmd):
if not spawn.find_executable(cmd):
print "*** %s not found: skipping" % cmd
return False
return True
def _header(name):
print
print name
print "=" * len(name)
print
def disk_space():
# the df output
_header("File System Summary")
dfraw = os.popen("df -Ph").read()
df = [s.split() for s in dfraw.splitlines()]
for fs in df:
try:
if int(fs[4][:-1]) > 95:
warn("Device %s (%s) is %s full, might be an issue" % (
fs[0], fs[5], fs[4]))
except ValueError:
# if it doesn't look like an int, that's fine
pass
print dfraw
def ebtables_dump():
tables = ['filter', 'nat', 'broute']
_header("EB Tables Dump")
if not _find_cmd('ebtables'):
return
for table in tables:
_dump_cmd("sudo ebtables -t %s -L" % table)
def iptables_dump():
tables = ['filter', 'nat', 'mangle']
_header("IP Tables Dump")
for table in tables:
_dump_cmd("sudo iptables --line-numbers -L -nv -t %s" % table)
def _netns_list():
process = subprocess.Popen(['ip', 'netns'], stdout=subprocess.PIPE)
stdout, _ = process.communicate()
return stdout.split()
def network_dump():
_header("Network Dump")
_dump_cmd("brctl show")
_dump_cmd("arp -n")
ip_cmds = ["addr", "link", "route"]
for cmd in ip_cmds + ['netns']:
_dump_cmd("ip %s" % cmd)
for netns_ in _netns_list():
for cmd in ip_cmds:
args = {'netns': netns_, 'cmd': cmd}
_dump_cmd('sudo ip netns exec %(netns)s ip %(cmd)s' % args)
def ovs_dump():
_header("Open vSwitch Dump")
# NOTE(cdent): If we're not using neutron + ovs these commands
# will not be present so
if not _find_cmd('ovs-vsctl'):
return
# NOTE(ihrachys): worlddump is used outside of devstack context (f.e. in
# grenade), so there is no single place to determine the bridge names from.
# Hardcode for now.
bridges = ('br-int', 'br-tun', 'br-ex')
_dump_cmd("sudo ovs-vsctl show")
for bridge in bridges:
_dump_cmd("sudo ovs-ofctl show %s" % bridge)
for bridge in bridges:
_dump_cmd("sudo ovs-ofctl dump-flows %s" % bridge)
def process_list():
_header("Process Listing")
_dump_cmd("ps axo "
"user,ppid,pid,pcpu,pmem,vsz,rss,tty,stat,start,time,args")
def compute_consoles():
_header("Compute consoles")
for root, dirnames, filenames in os.walk('/opt/stack'):
for filename in fnmatch.filter(filenames, 'console.log'):
fullpath = os.path.join(root, filename)
_dump_cmd("sudo cat %s" % fullpath)
def guru_meditation_report():
_header("nova-compute Guru Meditation Report")
try:
subprocess.check_call(["pgrep","nova-compute"])
except subprocess.CalledProcessError:
print "Skipping as nova-compute does not appear to be running"
return
_dump_cmd("kill -s USR2 `pgrep nova-compute`")
print "guru meditation report in nova-compute log"
def main():
opts = get_options()
fname = filename(opts.dir, opts.name)
print "World dumping... see %s for details" % fname
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
with open(fname, 'w') as f:
os.dup2(f.fileno(), sys.stdout.fileno())
disk_space()
process_list()
network_dump()
ovs_dump()
iptables_dump()
ebtables_dump()
compute_consoles()
guru_meditation_report()
if __name__ == '__main__':
try:
sys.exit(main())
except KeyboardInterrupt:
sys.exit(1)
|
|
# Human friendly input/output in Python.
#
# Author: Peter Odding <peter@peterodding.com>
# Last Change: December 1, 2020
# URL: https://humanfriendly.readthedocs.io
"""
Simple text manipulation functions.
The :mod:`~humanfriendly.text` module contains simple functions to manipulate text:
- The :func:`concatenate()` and :func:`pluralize()` functions make it easy to
generate human friendly output.
- The :func:`format()`, :func:`compact()` and :func:`dedent()` functions
provide a clean and simple to use syntax for composing large text fragments
with interpolated variables.
- The :func:`tokenize()` function parses simple user input.
"""
# Standard library modules.
import numbers
import random
import re
import string
import textwrap
# Public identifiers that require documentation.
__all__ = (
'compact',
'compact_empty_lines',
'concatenate',
'dedent',
'format',
'generate_slug',
'is_empty_line',
'join_lines',
'pluralize',
'pluralize_raw',
'random_string',
'split',
'split_paragraphs',
'tokenize',
'trim_empty_lines',
)
def compact(text, *args, **kw):
'''
Compact whitespace in a string.
Trims leading and trailing whitespace, replaces runs of whitespace
characters with a single space and interpolates any arguments using
:func:`format()`.
:param text: The text to compact (a string).
:param args: Any positional arguments are interpolated using :func:`format()`.
:param kw: Any keyword arguments are interpolated using :func:`format()`.
:returns: The compacted text (a string).
Here's an example of how I like to use the :func:`compact()` function, this
is an example from a random unrelated project I'm working on at the moment::
raise PortDiscoveryError(compact("""
Failed to discover port(s) that Apache is listening on!
Maybe I'm parsing the wrong configuration file? ({filename})
""", filename=self.ports_config))
The combination of :func:`compact()` and Python's multi line strings allows
me to write long text fragments with interpolated variables that are easy
to write, easy to read and work well with Python's whitespace
sensitivity.
'''
non_whitespace_tokens = text.split()
compacted_text = ' '.join(non_whitespace_tokens)
return format(compacted_text, *args, **kw)
def compact_empty_lines(text):
"""
Replace repeating empty lines with a single empty line (similar to ``cat -s``).
:param text: The text in which to compact empty lines (a string).
:returns: The text with empty lines compacted (a string).
"""
i = 0
lines = text.splitlines(True)
while i < len(lines):
if i > 0 and is_empty_line(lines[i - 1]) and is_empty_line(lines[i]):
lines.pop(i)
else:
i += 1
return ''.join(lines)
def concatenate(items, conjunction='and', serial_comma=False):
"""
Concatenate a list of items in a human friendly way.
:param items:
A sequence of strings.
:param conjunction:
The word to use before the last item (a string, defaults to "and").
:param serial_comma:
:data:`True` to use a `serial comma`_, :data:`False` otherwise
(defaults to :data:`False`).
:returns:
A single string.
>>> from humanfriendly.text import concatenate
>>> concatenate(["eggs", "milk", "bread"])
'eggs, milk and bread'
.. _serial comma: https://en.wikipedia.org/wiki/Serial_comma
"""
items = list(items)
if len(items) > 1:
final_item = items.pop()
formatted = ', '.join(items)
if serial_comma:
formatted += ','
return ' '.join([formatted, conjunction, final_item])
elif items:
return items[0]
else:
return ''
def dedent(text, *args, **kw):
"""
Dedent a string (remove common leading whitespace from all lines).
Removes common leading whitespace from all lines in the string using
:func:`textwrap.dedent()`, removes leading and trailing empty lines using
:func:`trim_empty_lines()` and interpolates any arguments using
:func:`format()`.
:param text: The text to dedent (a string).
:param args: Any positional arguments are interpolated using :func:`format()`.
:param kw: Any keyword arguments are interpolated using :func:`format()`.
:returns: The dedented text (a string).
The :func:`compact()` function's documentation contains an example of how I
like to use the :func:`compact()` and :func:`dedent()` functions. The main
difference is that I use :func:`compact()` for text that will be presented
to the user (where whitespace is not so significant) and :func:`dedent()`
for data file and code generation tasks (where newlines and indentation are
very significant).
"""
dedented_text = textwrap.dedent(text)
trimmed_text = trim_empty_lines(dedented_text)
return format(trimmed_text, *args, **kw)
def format(text, *args, **kw):
"""
Format a string using the string formatting operator and/or :meth:`str.format()`.
:param text: The text to format (a string).
:param args: Any positional arguments are interpolated into the text using
the string formatting operator (``%``). If no positional
arguments are given no interpolation is done.
:param kw: Any keyword arguments are interpolated into the text using the
:meth:`str.format()` function. If no keyword arguments are given
no interpolation is done.
:returns: The text with any positional and/or keyword arguments
interpolated (a string).
The implementation of this function is so trivial that it seems silly to
even bother writing and documenting it. Justifying this requires some
context :-).
**Why format() instead of the string formatting operator?**
For really simple string interpolation Python's string formatting operator
is ideal, but it does have some strange quirks:
- When you switch from interpolating a single value to interpolating
multiple values you have to wrap them in tuple syntax. Because
:func:`format()` takes a `variable number of arguments`_ it always
receives a tuple (which saves me a context switch :-). Here's an
example:
>>> from humanfriendly.text import format
>>> # The string formatting operator.
>>> print('the magic number is %s' % 42)
the magic number is 42
>>> print('the magic numbers are %s and %s' % (12, 42))
the magic numbers are 12 and 42
>>> # The format() function.
>>> print(format('the magic number is %s', 42))
the magic number is 42
>>> print(format('the magic numbers are %s and %s', 12, 42))
the magic numbers are 12 and 42
- When you interpolate a single value and someone accidentally passes in a
tuple your code raises a :exc:`~exceptions.TypeError`. Because
:func:`format()` takes a `variable number of arguments`_ it always
receives a tuple so this can never happen. Here's an example:
>>> # How expecting to interpolate a single value can fail.
>>> value = (12, 42)
>>> print('the magic value is %s' % value)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: not all arguments converted during string formatting
>>> # The following line works as intended, no surprises here!
>>> print(format('the magic value is %s', value))
the magic value is (12, 42)
**Why format() instead of the str.format() method?**
When you're doing complex string interpolation the :meth:`str.format()`
function results in more readable code, however I frequently find myself
adding parentheses to force evaluation order. The :func:`format()` function
avoids this because of the relative priority between the comma and dot
operators. Here's an example:
>>> "{adjective} example" + " " + "(can't think of anything less {adjective})".format(adjective='silly')
"{adjective} example (can't think of anything less silly)"
>>> ("{adjective} example" + " " + "(can't think of anything less {adjective})").format(adjective='silly')
"silly example (can't think of anything less silly)"
>>> format("{adjective} example" + " " + "(can't think of anything less {adjective})", adjective='silly')
"silly example (can't think of anything less silly)"
The :func:`compact()` and :func:`dedent()` functions are wrappers that
combine :func:`format()` with whitespace manipulation to make it easy to
write nice to read Python code.
.. _variable number of arguments: https://docs.python.org/2/tutorial/controlflow.html#arbitrary-argument-lists
"""
if args:
text %= args
if kw:
text = text.format(**kw)
return text
def generate_slug(text, delimiter="-"):
"""
Convert text to a normalized "slug" without whitespace.
:param text: The original text, for example ``Some Random Text!``.
:param delimiter: The delimiter used to separate words
(defaults to the ``-`` character).
:returns: The slug text, for example ``some-random-text``.
:raises: :exc:`~exceptions.ValueError` when the provided
text is nonempty but results in an empty slug.
"""
slug = text.lower()
escaped = delimiter.replace("\\", "\\\\")
slug = re.sub("[^a-z0-9]+", escaped, slug)
slug = slug.strip(delimiter)
if text and not slug:
msg = "The provided text %r results in an empty slug!"
raise ValueError(format(msg, text))
return slug
def is_empty_line(text):
"""
Check if a text is empty or contains only whitespace.
:param text: The text to check for "emptiness" (a string).
:returns: :data:`True` if the text is empty or contains only whitespace,
:data:`False` otherwise.
"""
return len(text) == 0 or text.isspace()
def join_lines(text):
"""
Remove "hard wrapping" from the paragraphs in a string.
:param text: The text to reformat (a string).
:returns: The text without hard wrapping (a string).
This function works by removing line breaks when the last character before
a line break and the first character after the line break are both
non-whitespace characters. This means that common leading indentation will
break :func:`join_lines()` (in that case you can use :func:`dedent()`
before calling :func:`join_lines()`).
"""
return re.sub(r'(\S)\n(\S)', r'\1 \2', text)
def pluralize(count, singular, plural=None):
"""
Combine a count with the singular or plural form of a word.
:param count: The count (a number).
:param singular: The singular form of the word (a string).
:param plural: The plural form of the word (a string or :data:`None`).
:returns: The count and singular or plural word concatenated (a string).
See :func:`pluralize_raw()` for the logic underneath :func:`pluralize()`.
"""
return '%s %s' % (count, pluralize_raw(count, singular, plural))
def pluralize_raw(count, singular, plural=None):
"""
Select the singular or plural form of a word based on a count.
:param count: The count (a number).
:param singular: The singular form of the word (a string).
:param plural: The plural form of the word (a string or :data:`None`).
:returns: The singular or plural form of the word (a string).
When the given count is exactly 1.0 the singular form of the word is
selected, in all other cases the plural form of the word is selected.
If the plural form of the word is not provided it is obtained by
concatenating the singular form of the word with the letter "s". Of course
this will not always be correct, which is why you have the option to
specify both forms.
"""
if not plural:
plural = singular + 's'
return singular if float(count) == 1.0 else plural
def random_string(length=(25, 100), characters=string.ascii_letters):
"""random_string(length=(25, 100), characters=string.ascii_letters)
Generate a random string.
:param length: The length of the string to be generated (a number or a
tuple with two numbers). If this is a tuple then a random
number between the two numbers given in the tuple is used.
:param characters: The characters to be used (a string, defaults
to :data:`string.ascii_letters`).
:returns: A random string.
The :func:`random_string()` function is very useful in test suites; by the
time I included it in :mod:`humanfriendly.text` I had already included
variants of this function in seven different test suites :-).
"""
if not isinstance(length, numbers.Number):
length = random.randint(length[0], length[1])
return ''.join(random.choice(characters) for _ in range(length))
def split(text, delimiter=','):
"""
Split a comma-separated list of strings.
:param text: The text to split (a string).
:param delimiter: The delimiter to split on (a string).
:returns: A list of zero or more nonempty strings.
Here's the default behavior of Python's built in :meth:`str.split()`
function:
>>> 'foo,bar, baz,'.split(',')
['foo', 'bar', ' baz', '']
In contrast here's the default behavior of the :func:`split()` function:
>>> from humanfriendly.text import split
>>> split('foo,bar, baz,')
['foo', 'bar', 'baz']
Here is an example that parses a nested data structure (a mapping of
logging level names to one or more styles per level) that's encoded in a
string so it can be set as an environment variable:
>>> from pprint import pprint
>>> encoded_data = 'debug=green;warning=yellow;error=red;critical=red,bold'
>>> parsed_data = dict((k, split(v, ',')) for k, v in (split(kv, '=') for kv in split(encoded_data, ';')))
>>> pprint(parsed_data)
{'debug': ['green'],
'warning': ['yellow'],
'error': ['red'],
'critical': ['red', 'bold']}
"""
return [token.strip() for token in text.split(delimiter) if token and not token.isspace()]
def split_paragraphs(text):
"""
Split a string into paragraphs (one or more lines delimited by an empty line).
:param text: The text to split into paragraphs (a string).
:returns: A list of strings.
"""
paragraphs = []
for chunk in text.split('\n\n'):
chunk = trim_empty_lines(chunk)
if chunk and not chunk.isspace():
paragraphs.append(chunk)
return paragraphs
def tokenize(text):
"""
Tokenize a text into numbers and strings.
:param text: The text to tokenize (a string).
:returns: A list of strings and/or numbers.
This function is used to implement robust tokenization of user input in
functions like :func:`.parse_size()` and :func:`.parse_timespan()`. It
automatically coerces integer and floating point numbers, ignores
whitespace and knows how to separate numbers from strings even without
whitespace. Some examples to make this more concrete:
>>> from humanfriendly.text import tokenize
>>> tokenize('42')
[42]
>>> tokenize('42MB')
[42, 'MB']
>>> tokenize('42.5MB')
[42.5, 'MB']
>>> tokenize('42.5 MB')
[42.5, 'MB']
"""
tokenized_input = []
for token in re.split(r'(\d+(?:\.\d+)?)', text):
token = token.strip()
if re.match(r'\d+\.\d+', token):
tokenized_input.append(float(token))
elif token.isdigit():
tokenized_input.append(int(token))
elif token:
tokenized_input.append(token)
return tokenized_input
def trim_empty_lines(text):
"""
Trim leading and trailing empty lines from the given text.
:param text: The text to trim (a string).
:returns: The trimmed text (a string).
"""
lines = text.splitlines(True)
while lines and is_empty_line(lines[0]):
lines.pop(0)
while lines and is_empty_line(lines[-1]):
lines.pop(-1)
return ''.join(lines)
|
|
#solve sokoban
import sys, time
import SokoMap, HashTable
import os
# Manhattan Distance between two points
def manDistance(a, b):
return abs(a[0]-b[0]) + abs(a[1]-b[1])
def precomputeDistances(sm):
"""Distances from any square to any other square.
Used for minmatching"""
imap = sm.getMap()
def heuristic(sm):
# generate all possible combinations of goals for each block
solutions = []
for b in sm.getBlocks():
solution = []
for g in sm.getGoals():
sol = (b, g, manDistance(b,g))
solution.append(sol)
solutions.append(solution)
# for sol in solutions:
# print sol
# print "------"
# Select the best
best = sys.maxint
for s in solutions[0]:
usedGoal = []
usedBlock = []
solution = []
usedGoal.append(s[1])
usedBlock.append(s[0])
solution.append(s)
h = s[2]
for lin in solutions:
for col in lin:
if col[1] not in usedGoal and col[0] not in usedBlock:
solution.append(col)
usedGoal.append(col[1])
usedBlock.append(col[0])
h = h + col[2]
break
if h < best:
best = h
result = solution
# print "-------"
# print result
# print best
w = sm.getPlayer()
d = sys.maxint
v = (-1,-1)
for x in sm.getUnplacedBlocks():
if manDistance(w, x) < d:
d = manDistance(w, x)
v = x
if v is not (-1,-1):
best = best + d
return best
def isClosed(closedSet, x):
for y in closedSet:
if x == y:
return True
return False
def IDAstar(sm, h):
MAXNODES = 20000000
openSet = []
closedSet = []
visitSet = []
pathLimit = h(sm) - 1
sucess = False
it = 0
while True:
pathLimit = pathLimit + 1
print "current pathLimit = ", pathLimit
sm.setG(0)
openSet.insert(0, sm)
ht = HashTable.HashTable()
nodes = 0
while len(openSet) > 0:
currentState = openSet.pop(0)
#currentState.printMap()
nodes = nodes + 1
if currentState.isSolution():
return currentState # SOLUTION FOUND!!!
if nodes % 1000000 == 0:
print (nodes/1000000), "M nodes checked"
if nodes == MAXNODES:
print "Limit of nodes reached: exiting without a solution."
sys.exit(1)
if currentState.getF() <= pathLimit:
closedSet.insert(0, currentState)
# get the sucessors of the current state
for x in currentState.children():
# test if node has been "closed"
if isClosed(closedSet,x):
continue
# check if this has already been generated
if ht.checkAdd(x):
continue
# compute G for each
x.setG(currentState.getG() + 1)
x.setF(x.getG()+ h(x))
#x.setParent(currentState)
openSet.insert(0, x) # push
else:
visitSet.insert(0, currentState)
#print "Nodes checked = ", nodes
print "iteration = ", it
it = it + 1
if len(visitSet) == 0:
print "FAIL"
return None
# set a new cut-off value (pathLimit)
low = visitSet[0].getF()
for x in visitSet:
if x.getF() < low:
low = x.getF()
pathLimit = low
# move nodes from VISIT to OPEN and reset closedSet
openSet.extend(visitSet)
visitSet = []
closedSet = []
def depth_first_search__scan(sm, h):
MAXNODES = 20000000
openSet = [sm]
ht = HashTable.HashTable()
ht.checkAdd(sm)
nodes = 0
while len(openSet) > 0:
currentState = openSet.pop()
#currentState.printMap()
nodes += 1
if currentState.isSolution():
return currentState # SOLUTION FOUND!!!
if nodes % 1000 == 0:
print nodes, " nodes checked"
sys.stdout.flush()
if nodes == MAXNODES:
print "Limit of nodes reached: exiting without a solution."
sys.exit(1)
for x in currentState.children():
# check if this has already been generated
if ht.checkAdd(x):
continue
openSet.append(x)
return None
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("filename", type=str,
help="a path to the filename with the board.")
parser.add_argument("--method", type=str, default="IDAstar",
help="The method - \"dfs\" or \"IDAstar\"")
args = parser.parse_args()
smap = SokoMap.SokoMap()
smap.readMap(args.filename)
smap.printMap()
print "-----"
smap.staticDeadlock()
print "-----"
smap.printMap()
#sys.exit(1)
#smap.buildInfluenceTable()
#sys.exit(-1)
start = time.time()
# TODO : Implement using a command line arg instead of the environment
# variable
scan_function = IDAstar
if args.method == 'dfs':
scan_function = depth_first_search__scan
elif args.method == 'IDAstar':
scan_function = IDAstar
else:
print "Unknown scan type"
sys.exit(-1)
sol = scan_function(smap, heuristic)
print time.time()-start
if sol is not None:
sol.printMap()
print "\n"
print sol.getMoveList()
|
|
from setup.linux.installer import Installer
from setup.linux import setup_util
from benchmark import framework_test
from benchmark.test_types import *
from utils import header
from utils import gather_tests
from utils import gather_frameworks
from utils import verify_database_connections
import os
import shutil
import stat
import json
import subprocess
import traceback
import time
import pprint
import csv
import sys
import logging
import socket
import threading
import textwrap
from pprint import pprint
from multiprocessing import Process
from datetime import datetime
# Cross-platform colored text
from colorama import Fore, Back, Style
# Text-based progress indicators
import progressbar
class Benchmarker:
##########################################################################################
# Public methods
##########################################################################################
############################################################
# Prints all the available tests
############################################################
def run_list_tests(self):
all_tests = self.__gather_tests
for test in all_tests:
print test.name
self.__finish()
############################################################
# End run_list_tests
############################################################
############################################################
# Prints the metadata for all the available tests
############################################################
def run_list_test_metadata(self):
all_tests = self.__gather_tests
all_tests_json = json.dumps(map(lambda test: {
"name": test.name,
"approach": test.approach,
"classification": test.classification,
"database": test.database,
"framework": test.framework,
"language": test.language,
"orm": test.orm,
"platform": test.platform,
"webserver": test.webserver,
"os": test.os,
"database_os": test.database_os,
"display_name": test.display_name,
"notes": test.notes,
"versus": test.versus
}, all_tests))
with open(os.path.join(self.full_results_directory(), "test_metadata.json"), "w") as f:
f.write(all_tests_json)
self.__finish()
############################################################
# End run_list_test_metadata
############################################################
############################################################
# parse_timestamp
# Re-parses the raw data for a given timestamp
############################################################
def parse_timestamp(self):
all_tests = self.__gather_tests
for test in all_tests:
test.parse_all()
self.__parse_results(all_tests)
self.__finish()
############################################################
# End parse_timestamp
############################################################
############################################################
# Run the tests:
# This process involves setting up the client/server machines
# with any necessary change. Then going through each test,
# running their setup script, verifying the URLs, and
# running benchmarks against them.
############################################################
def run(self):
##########################
# Get a list of all known
# tests that we can run.
##########################
all_tests = self.__gather_tests
##########################
# Setup client/server
##########################
print header("Preparing Server, Database, and Client ...", top='=', bottom='=')
self.__setup_server()
self.__setup_database()
self.__setup_client()
## Check if wrk (and wrk-pipeline) is installed and executable, if not, raise an exception
#if not (os.access("/usr/local/bin/wrk", os.X_OK) and os.access("/usr/local/bin/wrk-pipeline", os.X_OK)):
# raise Exception("wrk and/or wrk-pipeline are not properly installed. Not running tests.")
##########################
# Run tests
##########################
print header("Running Tests...", top='=', bottom='=')
result = self.__run_tests(all_tests)
##########################
# Parse results
##########################
if self.mode == "benchmark":
print header("Parsing Results ...", top='=', bottom='=')
self.__parse_results(all_tests)
self.__finish()
return result
############################################################
# End run
############################################################
############################################################
# database_sftp_string(batch_file)
# generates a fully qualified URL for sftp to database
############################################################
def database_sftp_string(self, batch_file):
sftp_string = "sftp -oStrictHostKeyChecking=no "
if batch_file != None: sftp_string += " -b " + batch_file + " "
if self.database_identity_file != None:
sftp_string += " -i " + self.database_identity_file + " "
return sftp_string + self.database_user + "@" + self.database_host
############################################################
# End database_sftp_string
############################################################
############################################################
# client_sftp_string(batch_file)
# generates a fully qualified URL for sftp to client
############################################################
def client_sftp_string(self, batch_file):
sftp_string = "sftp -oStrictHostKeyChecking=no "
if batch_file != None: sftp_string += " -b " + batch_file + " "
if self.client_identity_file != None:
sftp_string += " -i " + self.client_identity_file + " "
return sftp_string + self.client_user + "@" + self.client_host
############################################################
# End client_sftp_string
############################################################
############################################################
# generate_url(url, port)
# generates a fully qualified URL for accessing a test url
############################################################
def generate_url(self, url, port):
return self.server_host + ":" + str(port) + url
############################################################
# End generate_url
############################################################
############################################################
# get_output_file(test_name, test_type)
# returns the output file name for this test_name and
# test_type timestamp/test_type/test_name/raw
############################################################
def get_output_file(self, test_name, test_type):
return os.path.join(self.result_directory, self.timestamp, test_type, test_name, "raw")
############################################################
# End get_output_file
############################################################
############################################################
# output_file(test_name, test_type)
# returns the output file for this test_name and test_type
# timestamp/test_type/test_name/raw
############################################################
def output_file(self, test_name, test_type):
path = self.get_output_file(test_name, test_type)
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
return path
############################################################
# End output_file
############################################################
############################################################
# get_stats_file(test_name, test_type)
# returns the stats file name for this test_name and
# test_type timestamp/test_type/test_name/raw
############################################################
def get_stats_file(self, test_name, test_type):
return os.path.join(self.result_directory, self.timestamp, test_type, test_name, "stats")
############################################################
# End get_stats_file
############################################################
############################################################
# stats_file(test_name, test_type)
# returns the stats file for this test_name and test_type
# timestamp/test_type/test_name/raw
############################################################
def stats_file(self, test_name, test_type):
path = self.get_stats_file(test_name, test_type)
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
return path
############################################################
# End stats_file
############################################################
############################################################
# full_results_directory
############################################################
def full_results_directory(self):
path = os.path.join(self.result_directory, self.timestamp)
try:
os.makedirs(path)
except OSError:
pass
return path
############################################################
# End full_results_directory
############################################################
############################################################
# Latest intermediate results dirctory
############################################################
def latest_results_directory(self):
path = os.path.join(self.result_directory,"latest")
try:
os.makedirs(path)
except OSError:
pass
# Give testrunner permission to write into results directory
# so LOGDIR param always works in setup.sh
# While 775 is more preferrable, we would have to ensure that
# testrunner is in the group of the current user
if not self.os.lower() == 'windows':
mode777 = (stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR |
stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP |
stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)
os.chmod(path, mode777)
return path
############################################################
# report_verify_results
# Used by FrameworkTest to add verification details to our results
#
# TODO: Technically this is an IPC violation - we are accessing
# the parent process' memory from the child process
############################################################
def report_verify_results(self, framework, test, result):
if framework.name not in self.results['verify'].keys():
self.results['verify'][framework.name] = dict()
self.results['verify'][framework.name][test] = result
############################################################
# report_benchmark_results
# Used by FrameworkTest to add benchmark data to this
#
# TODO: Technically this is an IPC violation - we are accessing
# the parent process' memory from the child process
############################################################
def report_benchmark_results(self, framework, test, results):
if test not in self.results['rawData'].keys():
self.results['rawData'][test] = dict()
# If results has a size from the parse, then it succeeded.
if results:
self.results['rawData'][test][framework.name] = results
# This may already be set for single-tests
if framework.name not in self.results['succeeded'][test]:
self.results['succeeded'][test].append(framework.name)
else:
# This may already be set for single-tests
if framework.name not in self.results['failed'][test]:
self.results['failed'][test].append(framework.name)
############################################################
# End report_results
############################################################
##########################################################################################
# Private methods
##########################################################################################
############################################################
# Gathers all the tests
############################################################
@property
def __gather_tests(self):
tests = gather_tests(include=self.test,
exclude=self.exclude,
benchmarker=self)
# If the tests have been interrupted somehow, then we want to resume them where we left
# off, rather than starting from the beginning
if os.path.isfile('current_benchmark.txt'):
with open('current_benchmark.txt', 'r') as interrupted_benchmark:
interrupt_bench = interrupted_benchmark.read().strip()
for index, atest in enumerate(tests):
if atest.name == interrupt_bench:
tests = tests[index:]
break
return tests
############################################################
# End __gather_tests
############################################################
############################################################
# Makes any necessary changes to the server that should be
# made before running the tests. This involves setting kernal
# settings to allow for more connections, or more file
# descriptiors
#
# http://redmine.lighttpd.net/projects/weighttp/wiki#Troubleshooting
############################################################
def __setup_server(self):
try:
if os.name == 'nt':
return True
subprocess.check_call(["sudo","bash","-c","cd /sys/devices/system/cpu; ls -d cpu[0-9]*|while read x; do echo performance > $x/cpufreq/scaling_governor; done"])
subprocess.check_call("sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535".rsplit(" "))
subprocess.check_call("sudo sysctl -w net.core.somaxconn=65535".rsplit(" "))
subprocess.check_call("sudo -s ulimit -n 65535".rsplit(" "))
subprocess.check_call("sudo sysctl net.ipv4.tcp_tw_reuse=1".rsplit(" "))
subprocess.check_call("sudo sysctl net.ipv4.tcp_tw_recycle=1".rsplit(" "))
subprocess.check_call("sudo sysctl -w kernel.shmmax=134217728".rsplit(" "))
subprocess.check_call("sudo sysctl -w kernel.shmall=2097152".rsplit(" "))
except subprocess.CalledProcessError:
return False
############################################################
# End __setup_server
############################################################
############################################################
# Makes any necessary changes to the database machine that
# should be made before running the tests. Is very similar
# to the server setup, but may also include database specific
# changes.
############################################################
def __setup_database(self):
p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, shell=True)
p.communicate("""
sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
sudo sysctl -w net.core.somaxconn=65535
sudo sysctl -w kernel.sched_autogroup_enabled=0
sudo -s ulimit -n 65535
sudo sysctl net.ipv4.tcp_tw_reuse=1
sudo sysctl net.ipv4.tcp_tw_recycle=1
sudo sysctl -w kernel.shmmax=2147483648
sudo sysctl -w kernel.shmall=2097152
sudo sysctl -w kernel.sem="250 32000 256 512"
""")
# TODO - print kernel configuration to file
# echo "Printing kernel configuration:" && sudo sysctl -a
# Explanations:
# net.ipv4.tcp_max_syn_backlog, net.core.somaxconn, kernel.sched_autogroup_enabled: http://tweaked.io/guide/kernel/
# ulimit -n: http://www.cyberciti.biz/faq/linux-increase-the-maximum-number-of-open-files/
# net.ipv4.tcp_tw_*: http://www.linuxbrigade.com/reduce-time_wait-socket-connections/
# kernel.shm*: http://seriousbirder.com/blogs/linux-understanding-shmmax-and-shmall-settings/
# For kernel.sem: https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/5/html/Tuning_and_Optimizing_Red_Hat_Enterprise_Linux_for_Oracle_9i_and_10g_Databases/chap-Oracle_9i_and_10g_Tuning_Guide-Setting_Semaphores.html
############################################################
# End __setup_database
############################################################
############################################################
# Makes any necessary changes to the client machine that
# should be made before running the tests. Is very similar
# to the server setup, but may also include client specific
# changes.
############################################################
def __setup_client(self):
p = subprocess.Popen(self.client_ssh_string, stdin=subprocess.PIPE, shell=True)
p.communicate("""
sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
sudo sysctl -w net.core.somaxconn=65535
sudo -s ulimit -n 65535
sudo sysctl net.ipv4.tcp_tw_reuse=1
sudo sysctl net.ipv4.tcp_tw_recycle=1
sudo sysctl -w kernel.shmmax=2147483648
sudo sysctl -w kernel.shmall=2097152
""")
############################################################
# End __setup_client
############################################################
############################################################
# __run_tests
#
# 2013-10-02 ASB Calls each test passed in tests to
# __run_test in a separate process. Each
# test is given a set amount of time and if
# kills the child process (and subsequently
# all of its child processes). Uses
# multiprocessing module.
############################################################
def __run_tests(self, tests):
if len(tests) == 0:
return 0
logging.debug("Start __run_tests.")
logging.debug("__name__ = %s",__name__)
error_happened = False
if self.os.lower() == 'windows':
logging.debug("Executing __run_tests on Windows")
for test in tests:
with open('current_benchmark.txt', 'w') as benchmark_resume_file:
benchmark_resume_file.write(test.name)
if self.__run_test(test) != 0:
error_happened = True
else:
logging.debug("Executing __run_tests on Linux")
# Setup a nice progressbar and ETA indicator
widgets = [self.mode, ': ', progressbar.Percentage(),
' ', progressbar.Bar(),
' Rough ', progressbar.ETA()]
pbar = progressbar.ProgressBar(widgets=widgets, maxval=len(tests)).start()
pbar_test = 0
# These features do not work on Windows
for test in tests:
pbar.update(pbar_test)
pbar_test = pbar_test + 1
if __name__ == 'benchmark.benchmarker':
print header("Running Test: %s" % test.name)
with open('current_benchmark.txt', 'w') as benchmark_resume_file:
benchmark_resume_file.write(test.name)
test_process = Process(target=self.__run_test, name="Test Runner (%s)" % test.name, args=(test,))
test_process.start()
test_process.join(self.run_test_timeout_seconds)
self.__load_results() # Load intermediate result from child process
if(test_process.is_alive()):
logging.debug("Child process for {name} is still alive. Terminating.".format(name=test.name))
self.__write_intermediate_results(test.name,"__run_test timeout (="+ str(self.run_test_timeout_seconds) + " seconds)")
test_process.terminate()
test_process.join()
if test_process.exitcode != 0:
error_happened = True
pbar.finish()
if os.path.isfile('current_benchmark.txt'):
os.remove('current_benchmark.txt')
logging.debug("End __run_tests.")
if error_happened:
return 1
return 0
############################################################
# End __run_tests
############################################################
############################################################
# __run_test
# 2013-10-02 ASB Previously __run_tests. This code now only
# processes a single test.
#
# Ensures that the system has all necessary software to run
# the tests. This does not include that software for the individual
# test, but covers software such as curl and weighttp that
# are needed.
############################################################
def __run_test(self, test):
# Used to capture return values
def exit_with_code(code):
if self.os.lower() == 'windows':
return code
else:
sys.exit(code)
logDir = os.path.join(self.latest_results_directory, 'logs', test.name.lower())
try:
os.makedirs(logDir)
except Exception:
pass
with open(os.path.join(logDir, 'out.txt'), 'w') as out:
if test.os.lower() != self.os.lower() or test.database_os.lower() != self.database_os.lower():
out.write("OS or Database OS specified in benchmark_config.json does not match the current environment. Skipping.\n")
return exit_with_code(0)
# If the test is in the excludes list, we skip it
if self.exclude != None and test.name in self.exclude:
out.write("Test {name} has been added to the excludes list. Skipping.\n".format(name=test.name))
return exit_with_code(0)
out.write("test.os.lower() = {os} test.database_os.lower() = {dbos}\n".format(os=test.os.lower(),dbos=test.database_os.lower()))
out.write("self.results['frameworks'] != None: {val}\n".format(val=str(self.results['frameworks'] != None)))
out.write("test.name: {name}\n".format(name=str(test.name)))
out.write("self.results['completed']: {completed}\n".format(completed=str(self.results['completed'])))
if self.results['frameworks'] != None and test.name in self.results['completed']:
out.write('Framework {name} found in latest saved data. Skipping.\n'.format(name=str(test.name)))
print 'WARNING: Test {test} exists in the results directory; this must be removed before running a new test.\n'.format(test=str(test.name))
return exit_with_code(1)
out.flush()
out.write(header("Beginning %s" % test.name, top='='))
out.flush()
##########################
# Start this test
##########################
out.write(header("Starting %s" % test.name))
out.flush()
try:
if test.requires_database():
p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, stdout=out, stderr=out, shell=True)
p.communicate("""
sudo restart mysql
sudo restart mongod
sudo service redis-server restart
sudo service postgresql restart
sudo service cassandra restart
/opt/elasticsearch/elasticsearch restart
""")
time.sleep(10)
st = verify_database_connections([
("mysql", self.database_host, 3306),
("mongodb", self.database_host, 27017),
("redis", self.database_host, 6379),
("postgresql", self.database_host, 5432),
("cassandra", self.database_host, 9160),
("elasticsearch", self.database_host, 9200)
])
print "database connection test results:\n" + "\n".join(st[1])
if self.__is_port_bound(test.port):
# This can happen sometimes - let's try again
self.__stop_test(out)
out.flush()
time.sleep(15)
if self.__is_port_bound(test.port):
# We gave it our all
self.__write_intermediate_results(test.name, "port " + str(test.port) + " is not available before start")
out.write(header("Error: Port %s is not available, cannot start %s" % (test.port, test.name)))
out.flush()
print "Error: Unable to recover port, cannot start test"
return exit_with_code(1)
result = test.start(out)
if result != 0:
self.__stop_test(out)
time.sleep(5)
out.write( "ERROR: Problem starting {name}\n".format(name=test.name) )
out.flush()
self.__write_intermediate_results(test.name,"<setup.py>#start() returned non-zero")
return exit_with_code(1)
logging.info("Sleeping %s seconds to ensure framework is ready" % self.sleep)
time.sleep(self.sleep)
##########################
# Verify URLs
##########################
logging.info("Verifying framework URLs")
verificationPath = os.path.join(logDir,"verification")
try:
os.makedirs(verificationPath)
except OSError:
pass
passed_verify = test.verify_urls(verificationPath)
##########################
# Benchmark this test
##########################
if self.mode == "benchmark":
logging.info("Benchmarking")
out.write(header("Benchmarking %s" % test.name))
out.flush()
benchmarkPath = os.path.join(logDir,"benchmark")
try:
os.makedirs(benchmarkPath)
except OSError:
pass
test.benchmark(benchmarkPath)
##########################
# Stop this test
##########################
out.write(header("Stopping %s" % test.name))
out.flush()
self.__stop_test(out)
out.flush()
time.sleep(15)
if self.__is_port_bound(test.port):
# This can happen sometimes - let's try again
self.__stop_test(out)
out.flush()
time.sleep(15)
if self.__is_port_bound(test.port):
# We gave it our all
self.__write_intermediate_results(test.name, "port " + str(test.port) + " was not released by stop")
out.write(header("Error: Port %s was not released by stop %s" % (test.port, test.name)))
out.flush()
return exit_with_code(1)
out.write(header("Stopped %s" % test.name))
out.flush()
time.sleep(5)
##########################################################
# Save results thus far into the latest results directory
##########################################################
out.write(header("Saving results through %s" % test.name))
out.flush()
self.__write_intermediate_results(test.name,time.strftime("%Y%m%d%H%M%S", time.localtime()))
if self.mode == "verify" and not passed_verify:
print "Failed verify!"
return exit_with_code(1)
except (OSError, IOError, subprocess.CalledProcessError) as e:
self.__write_intermediate_results(test.name,"<setup.py> raised an exception")
out.write(header("Subprocess Error %s" % test.name))
traceback.print_exc(file=out)
out.flush()
try:
self.__stop_test(out)
except (subprocess.CalledProcessError) as e:
self.__write_intermediate_results(test.name,"<setup.py>#stop() raised an error")
out.write(header("Subprocess Error: Test .stop() raised exception %s" % test.name))
traceback.print_exc(file=out)
out.flush()
out.close()
return exit_with_code(1)
# TODO - subprocess should not catch this exception!
# Parent process should catch it and cleanup/exit
except (KeyboardInterrupt) as e:
self.__stop_test(out)
out.write(header("Cleaning up..."))
out.flush()
self.__finish()
sys.exit(1)
out.close()
return exit_with_code(0)
############################################################
# End __run_tests
############################################################
############################################################
# __stop_test(benchmarker)
# Stops all running tests
############################################################
def __stop_test(self, out):
try:
subprocess.check_call('sudo killall -s 9 -u %s' % self.runner_user, shell=True, stderr=out, stdout=out)
retcode = 0
except Exception:
retcode = 1
return retcode
############################################################
# End __stop_test
############################################################
def is_port_bound(self, port):
return self.__is_port_bound(port)
############################################################
# __is_port_bound
# Check if the requested port is available. If it
# isn't available, then a previous test probably didn't
# shutdown properly.
############################################################
def __is_port_bound(self, port):
port = int(port)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# Try to bind to all IP addresses, this port
s.bind(("", port))
# If we get here, we were able to bind successfully,
# which means the port is free.
except socket.error:
# If we get an exception, it might be because the port is still bound
# which would be bad, or maybe it is a privileged port (<1024) and we
# are not running as root, or maybe the server is gone, but sockets are
# still in TIME_WAIT (SO_REUSEADDR). To determine which scenario, try to
# connect.
try:
s.connect(("127.0.0.1", port))
# If we get here, we were able to connect to something, which means
# that the port is still bound.
return True
except socket.error:
# An exception means that we couldn't connect, so a server probably
# isn't still running on the port.
pass
finally:
s.close()
return False
############################################################
# End __is_port_bound
############################################################
############################################################
# __parse_results
# Ensures that the system has all necessary software to run
# the tests. This does not include that software for the individual
# test, but covers software such as curl and weighttp that
# are needed.
############################################################
def __parse_results(self, tests):
# Run the method to get the commmit count of each framework.
self.__count_commits()
# Call the method which counts the sloc for each framework
self.__count_sloc()
# Time to create parsed files
# Aggregate JSON file
with open(os.path.join(self.full_results_directory(), "results.json"), "w") as f:
f.write(json.dumps(self.results, indent=2))
############################################################
# End __parse_results
############################################################
#############################################################
# __count_sloc
#############################################################
def __count_sloc(self):
frameworks = gather_frameworks(include=self.test,
exclude=self.exclude, benchmarker=self)
jsonResult = {}
for framework, testlist in frameworks.iteritems():
if not os.path.exists(os.path.join(testlist[0].directory, "source_code")):
logging.warn("Cannot count lines of code for %s - no 'source_code' file", framework)
continue
# Unfortunately the source_code files use lines like
# ./cpoll_cppsp/www/fortune_old instead of
# ./www/fortune_old
# so we have to back our working dir up one level
wd = os.path.dirname(testlist[0].directory)
try:
command = "cloc --list-file=%s/source_code --yaml" % testlist[0].directory
if os.path.exists(os.path.join(testlist[0].directory, "cloc_defs.txt")):
command += " --read-lang-def %s" % os.path.join(testlist[0].directory, "cloc_defs.txt")
logging.info("Using custom cloc definitions for %s", framework)
# Find the last instance of the word 'code' in the yaml output. This should
# be the line count for the sum of all listed files or just the line count
# for the last file in the case where there's only one file listed.
command = command + "| grep code | tail -1 | cut -d: -f 2"
logging.debug("Running \"%s\" (cwd=%s)", command, wd)
lineCount = subprocess.check_output(command, cwd=wd, shell=True)
jsonResult[framework] = int(lineCount)
except subprocess.CalledProcessError:
continue
except ValueError as ve:
logging.warn("Unable to get linecount for %s due to error '%s'", framework, ve)
self.results['rawData']['slocCounts'] = jsonResult
############################################################
# End __count_sloc
############################################################
############################################################
# __count_commits
#
############################################################
def __count_commits(self):
frameworks = gather_frameworks(include=self.test,
exclude=self.exclude, benchmarker=self)
def count_commit(directory, jsonResult):
command = "git rev-list HEAD -- " + directory + " | sort -u | wc -l"
try:
commitCount = subprocess.check_output(command, shell=True)
jsonResult[framework] = int(commitCount)
except subprocess.CalledProcessError:
pass
# Because git can be slow when run in large batches, this
# calls git up to 4 times in parallel. Normal improvement is ~3-4x
# in my trials, or ~100 seconds down to ~25
# This is safe to parallelize as long as each thread only
# accesses one key in the dictionary
threads = []
jsonResult = {}
t1 = datetime.now()
for framework, testlist in frameworks.iteritems():
directory = testlist[0].directory
t = threading.Thread(target=count_commit, args=(directory,jsonResult))
t.start()
threads.append(t)
# Git has internal locks, full parallel will just cause contention
# and slowness, so we rate-limit a bit
if len(threads) >= 4:
threads[0].join()
threads.remove(threads[0])
# Wait for remaining threads
for t in threads:
t.join()
t2 = datetime.now()
# print "Took %s seconds " % (t2 - t1).seconds
self.results['rawData']['commitCounts'] = jsonResult
self.commits = jsonResult
############################################################
# End __count_commits
############################################################
############################################################
# __write_intermediate_results
############################################################
def __write_intermediate_results(self,test_name,status_message):
try:
self.results["completed"][test_name] = status_message
with open(os.path.join(self.latest_results_directory, 'results.json'), 'w') as f:
f.write(json.dumps(self.results, indent=2))
except (IOError):
logging.error("Error writing results.json")
############################################################
# End __write_intermediate_results
############################################################
def __load_results(self):
try:
with open(os.path.join(self.latest_results_directory, 'results.json')) as f:
self.results = json.load(f)
except (ValueError, IOError):
pass
############################################################
# __finish
############################################################
def __finish(self):
if not self.list_tests and not self.list_test_metadata and not self.parse:
tests = self.__gather_tests
# Normally you don't have to use Fore.BLUE before each line, but
# Travis-CI seems to reset color codes on newline (see travis-ci/travis-ci#2692)
# or stream flush, so we have to ensure that the color code is printed repeatedly
prefix = Fore.CYAN
for line in header("Verification Summary", top='=', bottom='').split('\n'):
print prefix + line
for test in tests:
print prefix + "| Test: %s" % test.name
if test.name in self.results['verify'].keys():
for test_type, result in self.results['verify'][test.name].iteritems():
if result.upper() == "PASS":
color = Fore.GREEN
elif result.upper() == "WARN":
color = Fore.YELLOW
else:
color = Fore.RED
print prefix + "| " + test_type.ljust(11) + ' : ' + color + result.upper()
else:
print prefix + "| " + Fore.RED + "NO RESULTS (Did framework launch?)"
print prefix + header('', top='', bottom='=') + Style.RESET_ALL
print "Time to complete: " + str(int(time.time() - self.start_time)) + " seconds"
print "Results are saved in " + os.path.join(self.result_directory, self.timestamp)
############################################################
# End __finish
############################################################
##########################################################################################
# Constructor
##########################################################################################
############################################################
# Initialize the benchmarker. The args are the arguments
# parsed via argparser.
############################################################
def __init__(self, args):
# Map type strings to their objects
types = dict()
types['json'] = JsonTestType()
types['db'] = DBTestType()
types['query'] = QueryTestType()
types['fortune'] = FortuneTestType()
types['update'] = UpdateTestType()
types['plaintext'] = PlaintextTestType()
# Turn type into a map instead of a string
if args['type'] == 'all':
args['types'] = types
else:
args['types'] = { args['type'] : types[args['type']] }
del args['type']
args['max_threads'] = args['threads']
args['max_concurrency'] = max(args['concurrency_levels'])
self.__dict__.update(args)
# pprint(self.__dict__)
self.start_time = time.time()
self.run_test_timeout_seconds = 7200
# setup logging
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
# setup some additional variables
if self.database_user == None: self.database_user = self.client_user
if self.database_host == None: self.database_host = self.client_host
if self.database_identity_file == None: self.database_identity_file = self.client_identity_file
# Remember root directory
self.fwroot = setup_util.get_fwroot()
# setup results and latest_results directories
self.result_directory = os.path.join("results")
if (args['clean'] or args['clean_all']) and os.path.exists(os.path.join(self.fwroot, "results")):
shutil.rmtree(os.path.join(self.fwroot, "results"))
self.latest_results_directory = self.latest_results_directory()
# remove installs directories if --clean-all provided
self.install_root = "%s/%s" % (self.fwroot, "installs")
if args['clean_all']:
os.system("sudo rm -rf " + self.install_root)
os.mkdir(self.install_root)
if hasattr(self, 'parse') and self.parse != None:
self.timestamp = self.parse
else:
self.timestamp = time.strftime("%Y%m%d%H%M%S", time.localtime())
self.results = None
try:
with open(os.path.join(self.latest_results_directory, 'results.json'), 'r') as f:
#Load json file into results object
self.results = json.load(f)
except IOError:
logging.warn("results.json for test not found.")
if self.results == None:
self.results = dict()
self.results['concurrencyLevels'] = self.concurrency_levels
self.results['queryIntervals'] = self.query_levels
self.results['frameworks'] = [t.name for t in self.__gather_tests]
self.results['duration'] = self.duration
self.results['rawData'] = dict()
self.results['rawData']['json'] = dict()
self.results['rawData']['db'] = dict()
self.results['rawData']['query'] = dict()
self.results['rawData']['fortune'] = dict()
self.results['rawData']['update'] = dict()
self.results['rawData']['plaintext'] = dict()
self.results['completed'] = dict()
self.results['succeeded'] = dict()
self.results['succeeded']['json'] = []
self.results['succeeded']['db'] = []
self.results['succeeded']['query'] = []
self.results['succeeded']['fortune'] = []
self.results['succeeded']['update'] = []
self.results['succeeded']['plaintext'] = []
self.results['failed'] = dict()
self.results['failed']['json'] = []
self.results['failed']['db'] = []
self.results['failed']['query'] = []
self.results['failed']['fortune'] = []
self.results['failed']['update'] = []
self.results['failed']['plaintext'] = []
self.results['verify'] = dict()
else:
#for x in self.__gather_tests():
# if x.name not in self.results['frameworks']:
# self.results['frameworks'] = self.results['frameworks'] + [x.name]
# Always overwrite framework list
self.results['frameworks'] = [t.name for t in self.__gather_tests]
# Setup the ssh command string
self.database_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.database_user + "@" + self.database_host
self.client_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.client_user + "@" + self.client_host
if self.database_identity_file != None:
self.database_ssh_string = self.database_ssh_string + " -i " + self.database_identity_file
if self.client_identity_file != None:
self.client_ssh_string = self.client_ssh_string + " -i " + self.client_identity_file
if self.install is not None:
install = Installer(self, self.install_strategy)
install.install_software()
############################################################
# End __init__
############################################################
|
|
#
# Copyright 2017 CNIT - Consorzio Nazionale Interuniversitario per le Telecomunicazioni
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import unicode_literals
import json
import yaml
import copy
from lib.util import Util
import os.path
import logging
from projecthandler.models import Project
from lib.toscanfv.toscanfv_rdcl_graph import ToscanfvRdclGraph
from lib.tosca.tosca_parser import ToscaParser
from toscaparser.tosca_template import ToscaTemplate
from translator.hot.tosca_translator import TOSCATranslator
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger('ToscaModel.py')
PATH_TO_SCHEMAS = 'lib/toscanfv/schemas/'
PATH_TO_DESCRIPTORS_TEMPLATES = 'lib/toscanfv/descriptor_template/'
DESCRIPTOR_TEMPLATE_SUFFIX = '.yaml'
GRAPH_MODEL_FULL_NAME = 'lib/TopologyModels/toscanfv/toscanfv.yaml'
EXAMPLES_FOLDER = 'usecases/TOSCANFV/'
PATH_TO_TOSCA_NFV_DEFINITION = 'toscaparser/extensions/nfv/TOSCA_nfv_definition_1_0_0.yaml'
class ToscanfvProject(Project):
"""Tosca class
The data model has the following descriptors:
'toscayaml'
"""
@classmethod
def data_project_from_files(cls, request):
file_dict = {}
for my_key in request.FILES.keys():
file_dict[my_key] = request.FILES.getlist(my_key)
data_project = ToscaParser.importprojectfiles(file_dict)
print "data project read from files:"
print data_project
return data_project
@classmethod
def data_project_from_example(cls, request):
example_id = request.POST.get('example-toscanfv-id', '')
data_project = ToscaParser.importprojectdir(EXAMPLES_FOLDER + example_id + '/YAML', 'yaml')
print "data project read from directory:"
print data_project
# data_project = importprojectdir('usecases/TOSCA/' + example_id + '/JSON', 'json')
return data_project
@classmethod
def get_example_list(cls):
"""Returns a list of directories, in each directory there is a project example"""
path = EXAMPLES_FOLDER
dirs = [d for d in os.listdir(path) if os.path.isdir(os.path.join(path, d))]
return {'toscanfv': dirs}
# @classmethod
# def get_graph_model(cls):
# """Returns the model of the graph of the project type as a yaml object
# Returns an empty dict if there is no file with the model
# """
# file_path = GRAPH_MODEL_FULL_NAME
# graph_model = {}
# try:
# graph_model = Util.loadyamlfile(file_path)
# except Exception as e:
# pass
# return graph_model
@classmethod
def get_json_schema_by_type(cls, type_descriptor):
schema = PATH_TO_SCHEMAS + type_descriptor + ".json"
return schema
@classmethod
def get_new_descriptor(cls, descriptor_type, request_id):
# util = Util()
json_template = cls.get_descriptor_template(descriptor_type)
if descriptor_type == 'toscayaml':
pass
# json_template['nsdIdentifier'] = request_id
# json_template['nsdInvariantId'] = request_id
else:
return {}
return json_template
@classmethod
def get_descriptor_template(cls, type_descriptor):
"""Returns a descriptor template for a given descriptor type"""
try:
# schema = Util.loadjsonfile(PATH_TO_DESCRIPTORS_TEMPLATES+type_descriptor+DESCRIPTOR_TEMPLATE_SUFFIX)
# print 'type_descriptor : '+type_descriptor
# FixMe bisogna creare un template
yaml_object = Util().loadyamlfile(
'toscaparser/extensions/nfv/tests/data/tosca_helloworld_nfv.yaml')
toscajson = json.loads(Util.yaml2json(yaml_object))
return toscajson
except Exception as e:
# log.error('Exception in get descriptor template') #TODO(stefano) add logging
print 'Exception in get descriptor template'
return False
@classmethod
def get_clone_descriptor(cls, descriptor, type_descriptor, new_descriptor_id):
new_descriptor = copy.deepcopy(descriptor)
return new_descriptor
def get_type(self):
return "toscanfv"
def __str__(self):
return self.name
def get_overview_data(self):
current_data = json.loads(self.data_project)
result = {
'owner': self.owner.__str__(),
'name': self.name,
'updated_date': self.updated_date.__str__(),
'info': self.info,
'type': 'toscanfv',
'toscayaml': len(current_data['toscayaml'].keys()) if 'toscayaml' in current_data else 0,
# 'nsd': len(current_data['nsd'].keys()) if 'nsd' in current_data else 0,
# 'vnffgd': len(current_data['vnffgd'].keys()) if 'vnffgd' in current_data else 0,
# 'vld': len(current_data['vld'].keys()) if 'vld' in current_data else 0,
# 'vnfd': len(current_data['vnfd'].keys()) if 'vnfd' in current_data else 0,
'validated': self.validated
}
return result
def get_graph_data_json_topology(self, descriptor_id):
test_t3d = ToscanfvRdclGraph()
project = self.get_dataproject()
topology = test_t3d.build_graph_from_project(project,
model=self.get_graph_model(GRAPH_MODEL_FULL_NAME))
# print json.dumps(topology)
return json.dumps(topology)
def create_descriptor(self, descriptor_name, type_descriptor, new_data, data_type):
"""Creates a descriptor of a given type from a json or yaml representation
Returns the descriptor id or False
"""
result = False
try:
print type_descriptor, data_type
current_data = json.loads(self.data_project)
if data_type == 'json':
new_descriptor = json.loads(new_data)
elif data_type == 'yaml':
# utility = Util()
yaml_object = yaml.load(new_data)
new_descriptor = json.loads(Util.yaml2json(yaml_object))
else:
print 'Unknown data type'
return False
if type_descriptor == 'toscayaml':
if descriptor_name is None:
new_descriptor_id = Util.get_unique_id()
else:
new_descriptor_id = descriptor_name
if not type_descriptor in current_data:
current_data[type_descriptor] = {}
current_data[type_descriptor][new_descriptor_id] = new_descriptor
self.data_project = current_data
# self.validated = validate #TODO(stefano) not clear if this is the validation for the whole project
self.update()
result = new_descriptor_id
else:
return False
except Exception as e:
print 'Exception in create descriptor', e
return result
def set_validated(self, value):
self.validated = True if value is not None and value == True else False
def get_add_element(self, request):
result = False
group_id = request.POST.get('group_id')
element_id = request.POST.get('element_id')
element_type = request.POST.get('element_type')
current_data = json.loads(self.data_project)
tosca_nfv_definition = Util().loadyamlfile(PATH_TO_TOSCA_NFV_DEFINITION)
node_types = {}
node_types.update(tosca_nfv_definition['node_types'])
new_element = {}
new_element['type'] = element_type
type_definition = node_types[element_type]
while element_type in node_types:
type_definition = node_types[element_type]
if 'properties' in type_definition:
for propriety in type_definition['properties']:
if 'required' not in type_definition['properties'][propriety] or \
type_definition['properties'][propriety]['required']:
if 'properties' not in new_element:
new_element['properties'] = {}
if propriety == 'version':
new_element['properties'][propriety] = 1.0
else:
new_element['properties'][propriety] = 'prova'
element_type = type_definition['derived_from'] if 'derived_from' in type_definition else None
if new_element['type'] == 'tosca.nodes.nfv.VNF':
if 'imports' not in current_data['toscayaml'][group_id] or current_data['toscayaml'][group_id][
'imports'] is None:
current_data['toscayaml'][group_id]['imports'] = []
current_data['toscayaml'][group_id]['imports'].append(element_id + '.yaml')
vnf_template = Util().loadyamlfile(PATH_TO_DESCRIPTORS_TEMPLATES + 'vnf.yaml')
vnf_template['topology_template']['subsititution_mappings'] = 'tosca.nodes.nfv.VNF.' + element_id
vnf_template['topology_template']['node_templates'] = {}
vnf_template['imports'] = []
vnf_template['node_types']['tosca.nodes.nfv.VNF.' + element_id] = {}
vnf_template['node_types']['tosca.nodes.nfv.VNF.' + element_id]['derived_from'] = 'tosca.nodes.nfv.VNF'
current_data['toscayaml'][element_id] = vnf_template
if 'node_templates' not in current_data['toscayaml'][group_id]['topology_template'] or current_data['toscayaml'][group_id]['topology_template']['node_templates'] is None:
current_data['toscayaml'][group_id]['topology_template']['node_templates'] = {}
current_data['toscayaml'][group_id]['topology_template']['node_templates'][element_id] = new_element
self.data_project = current_data
# self.validated = validate #TODO(stefano) not clear if this is the validation for the whole project
self.update()
result = True
return result
def get_remove_element(self, request):
group_id = request.POST.get('group_id')
element_id = request.POST.get('element_id')
element_type = request.POST.get('element_type')
current_data = json.loads(self.data_project)
if element_id in current_data['toscayaml'][group_id]['topology_template']['node_templates']: del \
current_data['toscayaml'][group_id]['topology_template']['node_templates'][element_id]
for key in current_data['toscayaml'][group_id]['topology_template']['node_templates']:
node = current_data['toscayaml'][group_id]['topology_template']['node_templates'][key]
if 'requirements' in node:
for r in node['requirements']:
for key in r.keys():
if r[key] == element_id:
node['requirements'].remove(r)
self.data_project = current_data
# self.validated = validate #TODO(stefano) not clear if this is the validation for the whole project
self.update()
result = True
return result
def get_add_link(self, request):
result = False
parameters = request.POST.dict()
#link = json.loads(parameters['link'])
#source = link['source']
#destination = link['target']
print parameters
source_type = parameters['source_type'] # source['info']['type']
destination_type = parameters['target_type'] # destination['info']['type']
source_id = parameters['source']
destination_id = parameters['target']
group = parameters['group']
current_data = json.loads(self.data_project)
if (source_type, destination_type) in [('tosca.nodes.nfv.CP', 'tosca.nodes.nfv.VL'),
('tosca.nodes.nfv.VL', 'tosca.nodes.nfv.CP'),
('tosca.nodes.nfv.CP', 'tosca.nodes.nfv.VL.ELine'),
('tosca.nodes.nfv.VL.ELine', 'tosca.nodes.nfv.CP'),
('tosca.nodes.nfv.CP', 'tosca.nodes.nfv.VL.ELAN'),
('tosca.nodes.nfv.VL.ELAN', 'tosca.nodes.nfv.CP'),
('tosca.nodes.nfv.CP', 'tosca.nodes.nfv.VL.ETree'),
('tosca.nodes.nfv.VL.ETree', 'tosca.nodes.nfv.CP')]:
cp_id = source_id if source_type == 'tosca.nodes.nfv.CP' else destination_id
vl_id = source_id if source_type != 'tosca.nodes.nfv.CP' else destination_id
if 'requirements' not in current_data['toscayaml'][group]['topology_template']['node_templates'][cp_id] or \
current_data['toscayaml'][group]['topology_template']['node_templates'][cp_id][
'requirements'] is None:
current_data['toscayaml'][group]['topology_template']['node_templates'][cp_id]['requirements'] = []
requirements = current_data['toscayaml'][group]['topology_template']['node_templates'][cp_id][
'requirements']
element = next((x for x in requirements if 'virtualLink' in x.keys()), None)
if element is not None:
element['virtualLink'] = vl_id
else:
element = {}
element['virtualLink'] = vl_id
requirements.append(element)
if (source_type, destination_type) in [('tosca.nodes.nfv.CP', 'tosca.nodes.nfv.VDU'),
('tosca.nodes.nfv.VDU', 'tosca.nodes.nfv.CP')]:
cp_id = source_id if source_type == 'tosca.nodes.nfv.CP' else destination_id
vl_id = source_id if source_type != 'tosca.nodes.nfv.CP' else destination_id
if 'requirements' not in current_data['toscayaml'][group]['topology_template']['node_templates'][cp_id] or \
current_data['toscayaml'][group]['topology_template']['node_templates'][cp_id][
'requirements'] is None:
current_data['toscayaml'][group]['topology_template']['node_templates'][cp_id]['requirements'] = []
requirements = current_data['toscayaml'][group]['topology_template']['node_templates'][cp_id][
'requirements']
element = next((x for x in requirements if 'virtualBinding' in x.keys()), None)
if element is not None:
element['virtualBinding'] = vl_id
else:
element = {}
element['virtualBinding'] = vl_id
requirements.append(element)
if (source_type, destination_type) in [('tosca.nodes.nfv.VNF', 'tosca.nodes.nfv.VL'),
('tosca.nodes.nfv.VL', 'tosca.nodes.nfv.VNF'),
('tosca.nodes.nfv.VNF', 'tosca.nodes.nfv.VL.ELine'),
('tosca.nodes.nfv.VL.ELine', 'tosca.nodes.nfv.VNF'),
('tosca.nodes.nfv.VNF', 'tosca.nodes.nfv.VL.ELAN'),
('tosca.nodes.nfv.VL.ELAN', 'tosca.nodes.nfv.VNF'),
('tosca.nodes.nfv.VNF', 'tosca.nodes.nfv.VL.ETree'),
('tosca.nodes.nfv.VL.ETree', 'tosca.nodes.nfv.VNF')]:
cp_id = source_id if source_type == 'tosca.nodes.nfv.VNF' else destination_id
vl_id = source_id if source_type != 'tosca.nodes.nfv.VNF' else destination_id
if 'requirements' not in current_data['toscayaml'][group]['topology_template']['node_templates'][cp_id] or \
current_data['toscayaml'][group]['topology_template']['node_templates'][cp_id][
'requirements'] is None:
current_data['toscayaml'][group]['topology_template']['node_templates'][cp_id]['requirements'] = []
requirements = current_data['toscayaml'][group]['topology_template']['node_templates'][cp_id][
'requirements']
element = next((x for x in requirements if 'virtualLink' in x.keys()), None)
if element is not None:
element['virtualLink'] = vl_id
else:
element = {}
element['virtualLink'] = vl_id
requirements.append(element)
self.data_project = current_data
# self.validated = validate #TODO(stefano) not clear if this is the validation for the whole project
self.update()
result = True
return result
def get_remove_link(self, request):
result = False
parameters = request.POST.dict()
print parameters
source_type = parameters['source_type'] # source['info']['type']
destination_type = parameters['target_type'] # destination['info']['type']
source_id = parameters['source']
destination_id = parameters['target']
group = parameters['group']
current_data = json.loads(self.data_project)
if (source_type, destination_type) in [('tosca.nodes.nfv.CP', 'tosca.nodes.nfv.VL'),
('tosca.nodes.nfv.VL', 'tosca.nodes.nfv.CP'),
('tosca.nodes.nfv.CP', 'tosca.nodes.nfv.VL.ELine'),
('tosca.nodes.nfv.VL.ELine', 'tosca.nodes.nfv.CP'),
('tosca.nodes.nfv.CP', 'tosca.nodes.nfv.VL.ELAN'),
('tosca.nodes.nfv.VL.ELAN', 'tosca.nodes.nfv.CP'),
('tosca.nodes.nfv.CP', 'tosca.nodes.nfv.VL.ETree'),
('tosca.nodes.nfv.VL.ETree', 'tosca.nodes.nfv.CP')]:
cp_id = source_id if source_type == 'tosca.nodes.nfv.CP' else destination_id
vl_id = source_id if source_type != 'tosca.nodes.nfv.CP' else destination_id
if 'requirements' not in current_data['toscayaml'][group]['topology_template']['node_templates'][cp_id] or \
current_data['toscayaml'][group]['topology_template']['node_templates'][cp_id][
'requirements'] is None:
current_data['toscayaml'][group]['topology_template']['node_templates'][cp_id]['requirements'] = []
requirements = current_data['toscayaml'][group]['topology_template']['node_templates'][cp_id][
'requirements']
element = next((x for x in requirements if 'virtualLink' in x.keys()), None)
if element is not None:
requirements = current_data['toscayaml'][group]['topology_template']['node_templates'][cp_id][
'requirements'].remove(element)
if (source_type, destination_type) in [('tosca.nodes.nfv.CP', 'tosca.nodes.nfv.VDU'),
('tosca.nodes.nfv.VDU', 'tosca.nodes.nfv.CP')]:
cp_id = source_id if source_type == 'tosca.nodes.nfv.CP' else destination_id
vl_id = source_id if source_type != 'tosca.nodes.nfv.CP' else destination_id
if 'requirements' not in current_data['toscayaml'][group]['topology_template']['node_templates'][cp_id] or \
current_data['toscayaml'][group]['topology_template']['node_templates'][cp_id][
'requirements'] is None:
current_data['toscayaml'][group]['topology_template']['node_templates'][cp_id]['requirements'] = []
requirements = current_data['toscayaml'][group]['topology_template']['node_templates'][cp_id][
'requirements']
element = next((x for x in requirements if 'virtualBinding' in x.keys()), None)
if element is not None:
requirements = current_data['toscayaml'][group]['topology_template']['node_templates'][cp_id][
'requirements'].remove(element)
if (source_type, destination_type) in [('tosca.nodes.nfv.VNF', 'tosca.nodes.nfv.VL'),
('tosca.nodes.nfv.VL', 'tosca.nodes.nfv.VNF'),
('tosca.nodes.nfv.VNF', 'tosca.nodes.nfv.VL.ELine'),
('tosca.nodes.nfv.VL.ELine', 'tosca.nodes.nfv.VNF'),
('tosca.nodes.nfv.VNF', 'tosca.nodes.nfv.VL.ELAN'),
('tosca.nodes.nfv.VL.ELAN', 'tosca.nodes.nfv.VNF'),
('tosca.nodes.nfv.VNF', 'tosca.nodes.nfv.VL.ETree'),
('tosca.nodes.nfv.VL.ETree', 'tosca.nodes.nfv.VNF')]:
cp_id = source_id if source_type == 'tosca.nodes.nfv.VNF' else destination_id
vl_id = source_id if source_type != 'tosca.nodes.nfv.VNF' else destination_id
if 'requirements' not in current_data['toscayaml'][group]['topology_template']['node_templates'][cp_id] or \
current_data['toscayaml'][group]['topology_template']['node_templates'][cp_id][
'requirements'] is None:
current_data['toscayaml'][group]['topology_template']['node_templates'][cp_id]['requirements'] = []
requirements = current_data['toscayaml'][group]['topology_template']['node_templates'][cp_id][
'requirements']
element = next((x for x in requirements if 'virtualLink' in x.keys()), None)
if element is not None:
requirements = current_data['toscayaml'][group]['topology_template']['node_templates'][cp_id][
'requirements'].remove(element)
self.data_project = current_data
# self.validated = validate #TODO(stefano) not clear if this is the validation for the whole project
self.update()
result = True
return result
def get_available_nodes(self, args):
"""Returns all available node """
log.debug('get_available_nodes')
try:
result = []
#current_data = json.loads(self.data_project)
model_graph = self.get_graph_model(GRAPH_MODEL_FULL_NAME)
for node in model_graph['layer'][args['layer']]['nodes']:
if 'addable' in model_graph['layer'][args['layer']]['nodes'][node] and model_graph['layer'][args['layer']]['nodes'][node]['addable']:
current_data = {
"id": node,
"category_name": model_graph['nodes'][node]['label'],
"types": [
{
"name": "generic",
"id": node
}
]
}
result.append(current_data)
#result = current_data[type_descriptor][descriptor_id]
except Exception as e:
log.debug(e)
result = []
return result
def get_generatehotemplate(self, request, descriptor_id, descriptor_type):
""" Generate hot template for a TOSCA descriptor
It is based on the reverse engineering of translator/shell.py
"""
result = ''
print "get_generatehotemplate"
print "descriptor_id: " + descriptor_id
print "descriptor_type: " + descriptor_type
project = self.get_dataproject()
print project['toscayaml'][descriptor_id]
tosca = ToscaTemplate(None, {}, False, yaml_dict_tpl=project['toscayaml'][descriptor_id])
translator = TOSCATranslator(tosca, {}, False, csar_dir=None)
# log.debug(_('Translating the tosca template.'))
print 'Translating the tosca template.'
print translator.translate()
result = translator.translate()
return result
|
|
#!/usr/bin/python
"""
Copyright 2017-2018 University of Cincinnati
All rights reserved. See LICENSE file at:
https://github.com/AS4SR/website
Additional copyright may be held by others, as reflected in the commit history.
"""
import os
import sys
import shutil
#
# create "verticalmenubar.part" as verticalmenubar_str for use!!
#
def create_and_return_verticalmenubar(html_create_list,website_top):
extra = " "*8 # set to "" for effectively level-start count from left side of html file
verticalmenubar_part_1 = """<!-- modified heavily from: http://stackoverflow.com/questions/7055024/how-to-store-nav-bar-in-one-file -->\n""" + extra + " "*4 + """<!-- <div id="menu"> -->"""
verticalmenubar_part_2 = extra + " "*4 + """</ul>\n""" + extra + " "*4 + """<!-- </div> -->
"""
verticalmenubar_part = []
verticalmenubar_part.append(verticalmenubar_part_1)
N = len(html_create_list)
i = 0
tabs = 0 # 4 spaces per level
level = 0
while (i < N):
[ul_class_level_in_menubar, name_in_css_file , outfile_location_rel_to_public_html , outfilename , \
part_filename_plus_location , name_in_vertical_menubar_and_htmlpage_title] = html_create_list[i]
if (ul_class_level_in_menubar == ''):
i += 1 # then ignore that part / don't add it to the menubar
continue
elif (ul_class_level_in_menubar == 'level1'):
tabs = 1
prevlevel = level
level = 1
elif (ul_class_level_in_menubar == 'level2'):
tabs = 2
prevlevel = level
level = 2
if (level > prevlevel): # assumes only 2 levels
verticalmenubar_part.append("\n" + extra + " "*(tabs*4) + """<ul class="%s">\n""" % ul_class_level_in_menubar)
elif (level < prevlevel): # assumes only 2 levels
verticalmenubar_part.append("""</li>\n""" + extra + " "*((tabs+1)*4) + """</ul>\n""" + extra + " "*(tabs*4+2) + """</li>\n""")
else: #elif (level == prevlevel):
verticalmenubar_part.append("""</li>\n""")
verticalmenubar_part.append(extra + " "*(tabs*4+2) + \
"""<li class="%s"><a href='%s%s%s'>%s</a>""" % (name_in_css_file,website_top,outfile_location_rel_to_public_html,outfilename,name_in_vertical_menubar_and_htmlpage_title))
# if want to include _target="???" ("_blank","_top",etc.), then define variable 'thetarget' and:
# verticalmenubar_part.append(extra + " "*(tabs*4+2) + \
# """<li class="%s"><a href='%s%s%s' _target="%s">%s</a>""" % #(name_in_css_file,website_top,outfile_location_rel_to_public_html,outfilename,***THE_TARGET***,name_in_vertical_menubar_and_htmlpage_title))
i += 1
# need to close out blocks at the end
ul_class_level_in_menubar_LAST = html_create_list[i-1][0]
if (ul_class_level_in_menubar_LAST == 'level2'):
verticalmenubar_part.append("""</li>\n""" + extra + " "*(tabs*4) + """</ul>\n""" + extra + " "*(tabs*4-2) + """</li>\n""")
tabs = 1
prevlevel = level
level = 1
if (ul_class_level_in_menubar_LAST == 'level1'):
verticalmenubar_part.append("""</li>\n""" + extra + " "*(tabs*4) + """</ul>\n""" + extra + " "*(tabs*4-2) + """</li>\n""")
tabs = 0
prevlevel = level
level = 0
verticalmenubar_part.append(verticalmenubar_part_2)
# then concatenate all strings (and print string to screen, not to file "vertical_menubar.part")
verticalmenubar_part_str = "".join(verticalmenubar_part)
#print(verticalmenubar_part_str)
return verticalmenubar_part_str
def create_css_file_and_write_to_disk(html_sitedir,gitdir,html_create_list,website_top,css_filename):
print("starting CSS file creation...")
print("creating directory (" + html_sitedir + ") that it goes within...")
# first, try and create the directory the file's gonna reside in, in case it doesn't exist already
try:
# os.mkdir(html_top + html_sitedir)
os.makedirs(html_sitedir) # will make intermediate directories if needed
print("directory created")
except:
pass
# grab the other file pieces you need on this run:
print("grabbing pieces...")
piece = []
# read in CSS file template for fill-in
filename_temp = gitdir + "_templates/styles.css.template"
f = open(filename_temp,'r'); tempholdtext = f.read(); f.close();
csstemplate = str(tempholdtext)
# prepare mainbody stuff (from whatever the latest html file fragment is from the html_create_list)
holdstr = "" # don't put this in unless it's not the first one
for i in range(len(html_create_list)):
name_in_css_file = html_create_list[i][1]
if (len(name_in_css_file) > 0): # skip if doesn't have a name_in_css_file (string is != "")
piece.append(holdstr + "body.%s li.%s a" % (name_in_css_file,name_in_css_file))
holdstr = ",\n"
#print(piece)
print("all pieces grabbed!")
# then, stitch the file together:
print("stitching file together...")
# do string substitution:
filecontents = csstemplate.replace( "$CSSLICLASSTAGS", "".join(piece) )
print("file stitched!")
# now, write everything to the file
filelocation_str = html_sitedir + css_filename
print("writing "+ filelocation_str + "...")
f = open(filelocation_str,'w');
f.write(filecontents); f.close();
print(filelocation_str + " has been written")
# we're done!
print("done with CSS file compilation")
def filereadin_replace_returnstr(filename_temp,lineoffsetbyXspaces,replacestr,html_data):
"""
Inputs:
filename_temp = gitdir + "_templates/SOMETHING.part"
lineoffsetbyXspaces is None or an integer number
replacestr = "$NAMEOFREPLACESTR"
html_data is current string getting stuff replaced in it (assumed None if replacestr is None)
Algorithm:
if replacestr is None (html_data assumed None), then tempholdtext (the string read in from file) is returned
else, an updated html_data string (that had replacestr replaced with tempholdtext, with proper offsetting) is returned
Usage:
html_data = filereadin_replace_returnstr(filename_temp,lineoffsetbyXspaces,replacestr,html_data)
"""
f = open(filename_temp,'r'); tempholdtext = f.read(); f.close();
if (lineoffsetbyXspaces is not None) and (lineoffsetbyXspaces != 0):
#print("did tempholdreplace!! lineoffsetbyXspaces = %d" % lineoffsetbyXspaces)
tempholdtext = tempholdtext.replace("\n","\n"+" "*lineoffsetbyXspaces) # offset each line by X spaces from left side of html file
if replacestr is None: # html_data should be None as well
return tempholdtext
else:
return html_data.replace( replacestr, tempholdtext )
def create_html_file_and_write_to_disk(html_sitedir,html_create_list_piece,full_templatedir,html_full_template,titlerider,website_top,css_to_use,gitdir):
[ul_class_level_in_menubar, name_in_css_file , outfile_location_rel_to_public_html , outfilename , \
part_filename_plus_location , name_in_vertical_menubar_and_htmlpage_title] = html_create_list_piece
print("starting work on " + html_sitedir + outfile_location_rel_to_public_html + outfilename)
print("creating directory (" + html_sitedir + outfile_location_rel_to_public_html + ") that it goes within...")
# first, try and create the directory the file's gonna reside in, in case it doesn't exist already
try:
#os.mkdir(html_sitedir + outfile_location_rel_to_public_html)
os.makedirs(html_sitedir + outfile_location_rel_to_public_html) # will make intermediate directories if needed
print("directory created")
except:
pass
# read in html file template for fill-in
html_data = str(filereadin_replace_returnstr(full_templatedir + html_full_template,None,None,None))
# grab the other file pieces you need on this run:
print("grabbing pieces...")
# $PAGETITLE = title of webpage, <title>--THISHERE--</title>
# $CSSFILE = CSS file name, href="-->THISHERE<--"
# $BODYCLASS = body class="-->THISHERE<--"
# $TOPOFPAGE = topofpage stuff (from testtopbar2div.html)
# $VERTICALMENUBAR = verticalmenubar stuff (from testnavbar2.html)
# $MAINBODY = mainbody stuff (from whatever the latest html file fragment is from the html_create_list)
# $FOOTER = footer stuff
# $BOTTOMOFPAGE = bottomof page stuff
# this ends off the html file
html_data = html_data.replace( "$PAGETITLE", name_in_vertical_menubar_and_htmlpage_title + titlerider )
html_data = html_data.replace( "$CSSFILE", css_to_use ) # for live website, use this
html_data = html_data.replace( "$BODYCLASS", name_in_css_file )
html_data = filereadin_replace_returnstr(gitdir + "_templates/topofpage.part",4,"$TOPOFPAGE",html_data)
verticalmenubar_part_str = create_and_return_verticalmenubar(html_create_list,website_top) # already offset
html_data = html_data.replace( "$VERTICALMENUBAR", verticalmenubar_part_str )
if (part_filename_plus_location[0:23] == '_template_parts/people/'): # then we need to create the .part file first
people_full_template = "_template_parts/people/people_piece.part"
people_data = str(filereadin_replace_returnstr(gitdir + people_full_template,None,None,None))
# then do all the replacements by pulling stuff from the .txt file (& replace in the *_piece.part file)
partfile_data = str(filereadin_replace_returnstr(gitdir + part_filename_plus_location,None,None,None))
partfile_data_list = partfile_data.split('\n')
print("%r\n" % (partfile_data_list,))
for i in range(len(partfile_data_list)):
if ("$PIC$" == partfile_data_list[i]): # then the next line contains a pic
people_data = people_data.replace( "$PIC$", str(partfile_data_list[i+1]))
elif (len(partfile_data_list[i])>0) and ("$" == partfile_data_list[i][0]): # then the next lines contain contact info until hit another $ section
holdstr = ""
for j in range(i+1,len(partfile_data_list)):
if (len(partfile_data_list[j])>0) and (partfile_data_list[j][0] == "$"): # if starts with a $ then done
break
else:
holdstr = holdstr + partfile_data_list[j] + "\n"
if ("$MAINCONTACT$" == partfile_data_list[i]):
people_data = people_data.replace( "$MAINCONTACT$", holdstr )
elif ("$PUBLICATIONS$" == partfile_data_list[i]):
people_data = people_data.replace( "$PUBLICATIONS$", holdstr )
elif ("$PROJECTS$" == partfile_data_list[i]):
people_data = people_data.replace( "$PROJECTS$", holdstr )
else:
print("Did not find a match for %s in template" % (partfile_data_list[i],))
# now replace the stuff in the file
html_data = html_data.replace( "$MAINBODY", people_data )
else: # use the pre-existing part file instead
html_data = filereadin_replace_returnstr(gitdir + part_filename_plus_location,12,"$MAINBODY",html_data)
html_data = filereadin_replace_returnstr(gitdir + "_templates/footer.part",8,"$FOOTER",html_data)
html_data = html_data.replace( "$BOTTOMOFPAGE", "" ) #("testbottomofpage (placeholder text)")
print("all pieces grabbed!")
# then, stitch the file together:
print("stitching file together...")
# do string substitution:
filecontents = str(html_data) # already stitched above
print("file stitched!")
# now, write everything to the file
filelocation_str = html_sitedir + outfile_location_rel_to_public_html + outfilename
print("writing "+ filelocation_str + "...")
f = open(filelocation_str,'w');
f.write(filecontents); f.close();
print(filelocation_str + " has been written")
if __name__ == '__main__':
"""
Call from the same directory via:
1$ ./create_html.py
--or--
2$ ./create_html.py local
--or--
3$ ./create_html.py local [gitdir] [html_sitedir]
--or--
4$ ./create_html.py [website_top] [gitdir] [html_sitedir]
The 1st ($1) is the vanilla run that is used for getting the AS4SR
website (http://www.ase.uc.edu/~spacerobotics/) compiled using the
pulldown_instructions.sh script.
The 2nd ($2) will attempt to perform a local compile of the website in
the local computer directory /home/spacerobotics/public_html
as per website_top and gitdir. So, if you want this to work,
you make have to perform the following at the command prompt first:
sudo mkdir -p /home/spacerobotics
sudo chown -R $USER:$USER /home/spacerobotics
The 3rd ($3) will attempt to perform a compile of the website in a
different location, assuming that gitdir may not be in spacerobotics
as per the usual pulldown_instructions.sh script. Example usage:
./create_html.py local /home/$USER/git_pulls/website/ /home/$USER/test_website/html_here/
This will put the files in /home/$USER/test_website/html_here/ and
create all internal links as "file:///home/$USER/test_website/html_here/"
Due to the way shutil.copy works, if you want this to work,
you make have to perform the following at the command prompt first:
rm -rf /home/$USER/test_website/html_here/
The 4th ($4) will attempt to perform a compile of the website in a
different location, assuming that gitdir may not be in spacerobotics
as per the usual pulldown_instructions.sh script. Example usage:
./create_html.py https://www.spacerobotics.uc.edu/~$USER/ /home/$USER/git_pulls/website/ /home/$USER/my_website/public_html/
This will put the files in /home/$USER/my_website/public_html/ and
create all internal links as "https://www.spacerobotics.uc.edu/~$USER/"
Due to the way shutil.copy works, if you want this to work,
you make have to perform the following at the command prompt first:
rm -rf /home/$USER/my_website/public_html
"""
# ---- Parameters for html site creation ----
website_top="http://www.ase.uc.edu/~spacerobotics/"
# website_top will be find-replaced with gitdir if local_compile_check = "yes" below
gitdir = "/home/spacerobotics/git_pulls/website-master/"
html_sitedir = "/home/spacerobotics/public_html/"
# ---- Get commandline variables (some can overwrite the above) ----
local_compile_check = "no" # for live website # this is the default compilation option
holdargs = sys.argv
if len(holdargs)>1:
if isinstance(holdargs[1],str):
if holdargs[1] == "local": # for local compile, type "local" at the prompt
local_compile_check = "yes" # for local directory checks of website, not-live, use this -- note that body images &etc. will not work
website_top = "file://" + html_sitedir
else: # is not a local compile, have other vars
website_top = holdargs[1]
print("*** local_compile_check = %s ***" % local_compile_check)
if len(holdargs)>3:
if isinstance(holdargs[2],str) and isinstance(holdargs[3],str):
gitdir = holdargs[2]
html_sitedir = holdargs[3]
if (local_compile_check == "yes"): # html_sitedir is global directory
website_top = "file://" + html_sitedir
#else: the new website_top was given earlier
print("*** html_sitedir = %s ***" % html_sitedir)
print("*** website_top = %s ***" % website_top)
# we are going to assume that the directory doesn't exist yet because it's a new wget download-and-unzip
css_filename = "styles.css"
#css_dir is html_sitedir
#css_to_use = gitdir + "public_html/" + css_filedir + css_filename
css_to_use = website_top + css_filename
full_templatedir = gitdir + "_templates/"
html_full_template = 'html_full.template'
# will require the following:
# $PAGETITLE = title of webpage, <title>--THISHERE--</title>
# $CSSFILE = CSS file name, href="-->THISHERE<--"
# $BODYCLASS = body class="-->THISHERE<--"
# $TOPOFPAGE = topofpage stuff (from testtopbar2div.html)
# $VERTICALMENUBAR = verticalmenubar stuff (from testnavbar2.html)
# $MAINBODY = mainbody stuff (from whatever the latest html file fragment is from the html_create_list)
# $FOOTER = footer stuff
# $BOTTOMOFPAGE = bottomof page stuff
# this ends off the html file
# each entry of html_create_list is:
# [ul_class_level_in_menubar, name_in_css_file , outfile_location_rel_to_public_html , outfilename ,
# part_filename_plus_location , name_in_vertical_menubar_and_htmlpage_title]
html_create_list = \
[['','','./','custom404.html','_template_parts/custom404.part','404 Error'], # note that this isn't added to the CSS or menubar list
['level1','index','','index.html','_template_parts/index.part','Home'],
['level1','about','','about.html','_template_parts/about.part','About'],
['level1','publications','','publications.html','_template_parts/publications.part','Publications'],
['level1','people','','people.html','_template_parts/people.part','People'],
#['','','people/','mcghan.html','_template_parts/people/mcghancl.part','Prof. Cat McGhan'],
#['','','people/','verbryke.html','_template_parts/people/verbrymr.part','Matthew Verbryke'],
#['','','people/','medhi.html','_template_parts/people/medhijk.part','Jishu Medhi'],
#['','','people/','shi.html','_template_parts/people/shizu.part','Zhenyu Shi'],
#['','','people/','korte.html','_template_parts/people/kortecm.part','Chris Korte'],
['','','people/','mcghan.html','_template_parts/people/mcghancl.txt','Prof. Cat McGhan'],
['','','people/','verbryke.html','_template_parts/people/verbrymr.txt','Matthew Verbryke'],
['','','people/','medhi.html','_template_parts/people/medhijk.txt','Jishu Medhi'],
['','','people/','shi.html','_template_parts/people/shizu.txt','Zhenyu Shi'],
['','','people/','korte.html','_template_parts/people/kortecm.txt','Chris Korte'],
['','','people/','muthaiah.html','_template_parts/people/muthaipd.txt','Ponaravind Muthaiah'],
['','','people/','karra.html','_template_parts/people/karrasa.txt','Sailendra Karra'],
['','','people/','kashid.html','_template_parts/people/kashidsv.txt','Sujeet Kashid'],
['level1','research','','research.html','_template_parts/research.part','Research'],
['level2','dualarm_inmoov','research/','dualarm_inmoov.html','_template_parts/research/dualarm_inmoov.part','Dual-arm Manipulation'],
['level2','aerialmanip','research/','aerialmanip.html','_template_parts/research/aerialmanip.part','Aerial Manipulator'],
['level2','vr_gazebo','research/','vrplusgazebo.html','_template_parts/research/vrplusgazebo.part','VR + ROS Gazebo'],
['level2','medicalmanip','research/','medicalmanip.html','_template_parts/research/medicalmanip.part','Medical Manipulator'],
['level2','rse_cont','research/','rse_cont.html','_template_parts/research/rse2pt0.part','Resilient Spacecraft Executive'],
['level2','tss_cont','research/','tss_cont.html','_template_parts/research/tss2pt0.part','Total System Stability'],
['level1','projects','','projects.html','_template_parts/unassigned_projects.part','Unassigned Projects'], # open projects
['level1','links','','links.html','_template_parts/links.part','Links'],
#['level2','resources','','resources.html','_template_parts/resources.part','Resources at UC'],
#['level2','related','','related.html','_template_parts/related_labs.part','Related Labs'],
['level1','robots','','robots.html','_template_parts/robots.part','Robots'],
#['level2','','robots/','rovers.html','_template_parts/robots/rovers.part','Rovers'],
#['level2','','robots/','uavs.html','_template_parts/robots/uavs.part','UAVs'],
#['level2','','robots/','arms.html','_template_parts/robots/arms.part','Manipulators'],
#['level2','','robots/','misc.html','_template_parts/robots/misc.part','Misc. Robots'],
['level1','archived','','archived.html','_template_parts/archived.part','Archived Work'],
#['level2','archived_projects','archived_projects/','projects.html','_template_parts/archived_projects.part','Archived Projects'],
#['level2','archived_robots','archived_robots/','robots.html','_template_parts/archived_robots.part','Archived Robots']]
['','','archived_projects/','projects.html','_template_parts/archived_projects.part','Archived Projects'],
['','','archived_robots/','robots.html','_template_parts/archived_robots.part','Archived Robots']]
titlerider = " - AS4SR Lab, University of Cincinnati"
# ---- end parameters for html site creation ----
#try:
if (True):
#
# move all stuff in current gitdir public_html directory over, just in case (for the overwrite)
#
print("copying contents of " + gitdir + "public_html to " + html_sitedir + " -- will fail if dir already exists...")
try:
shutil.copytree(gitdir + "public_html/./", html_sitedir) # copy src to dst, must not already exist
except:
print("directory already exists, stopping script run")
sys.exit(1)
#
# create styles.css file
#
create_css_file_and_write_to_disk(html_sitedir,gitdir,html_create_list,website_top,css_filename)
#
# now, get the majority of html files stitched together
#
print("starting html creation...")
for i in range(len(html_create_list)):
create_html_file_and_write_to_disk(html_sitedir, html_create_list[i], \
full_templatedir, html_full_template, \
titlerider, \
website_top, \
css_to_use, \
gitdir)
# we're done!
print("done with html compilation")
print("We're done! Completed writing all files successfully :)")
#except:
else:
print("Error: ran into a problem and crashed!!")
# --EOF--
|
|
# -*- coding: utf-8 -*-
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from permabots.models.base import PermabotsModel
from permabots.models import Bot, Response
from jinja2 import Environment
import requests
from django.conf.urls import url
import json
import logging
from permabots import validators
from rest_framework.status import is_success
from permabots import caching
from permabots import utils
logger = logging.getLogger(__name__)
class AbstractParam(PermabotsModel):
"""
Abstract parameter for :class:`Request <permabots.models.handler.Request>`
"""
key = models.CharField(_('Key'), max_length=255, help_text=_("Name of the parameter"))
value_template = models.CharField(_('Value template'), max_length=255, validators=[validators.validate_template],
help_text=_("Value template of the parameter. In jinja2 format. http://jinja.pocoo.org/"))
class Meta:
abstract = True
verbose_name = _('Parameter')
verbose_name_plural = _('Parameters')
def __str__(self):
return "(%s, %s)" % (self.key, self.value_template)
def process(self, **context):
"""
Render value_template of the parameter using context.
:param context: Processing context
"""
env = Environment(extensions=['jinja2_time.TimeExtension'])
value_template = env.from_string(self.value_template)
return value_template.render(**context)
@python_2_unicode_compatible
class Request(PermabotsModel):
"""
HTTP Request to perform some processing when handling a message
"""
url_template = models.CharField(_('Url template'), max_length=255, validators=[validators.validate_template],
help_text=_("Url to request. A jinja2 template. http://jinja.pocoo.org/"))
GET, POST, PUT, PATCH, DELETE = ("Get", "Post", "Put", "Patch", "Delete")
METHOD_CHOICES = (
(GET, _("Get")),
(POST, _("Post")),
(PUT, _("Put")),
(DELETE, _("Delete")),
(PATCH, _("Patch")),
)
method = models.CharField(_("Method"), max_length=128, default=GET, choices=METHOD_CHOICES, help_text=_("Define Http method for the request"))
data = models.TextField(null=True, blank=True, verbose_name=_("Data of the request"), help_text=_("Set POST/PUT/PATCH data in json format"),
validators=[validators.validate_template])
class Meta:
verbose_name = _('Request')
verbose_name_plural = _('Requests')
def __str__(self):
return "%s(%s)" % (self.method, self.url_template)
def _get_method(self):
method = {self.GET: requests.get,
self.POST: requests.post,
self.PUT: requests.put,
self.PATCH: requests.patch,
self.DELETE: requests.delete}
try:
return method[self.method]
except KeyError:
logger.error("Method %s not valid" % self.method)
return method[self.GET]
def _url_params(self, **context):
params = {}
for param in self.url_parameters.all():
params[param.key] = param.process(**context)
return params
def _header_params(self, **context):
headers = {}
for header in self.header_parameters.all():
headers[header.key] = header.process(**context)
return headers
def data_required(self):
return self.method != self.GET and self.method != self.DELETE
def process(self, **context):
"""
Process handler request. Before executing requests render templates with context
:param context: Processing context
:returns: Requests response `<http://docs.python-requests.org/en/master/api/#requests.Response>` _.
"""
env = Environment(extensions=['jinja2_time.TimeExtension'])
url_template = env.from_string(self.url_template)
url = url_template.render(**context).replace(" ", "")
logger.debug("Request %s generates url %s" % (self, url))
params = self._url_params(**context)
logger.debug("Request %s generates params %s" % (self, params))
headers = self._header_params(**context)
logger.debug("Request %s generates header %s" % (self, headers))
if self.data_required():
data_template = env.from_string(self.data)
data = data_template.render(**context)
logger.debug("Request %s generates data %s" % (self, data))
r = self._get_method()(url, data=json.loads(data), headers=headers, params=params)
else:
r = self._get_method()(url, headers=headers, params=params)
return r
class UrlParam(AbstractParam):
"""
Url Parameter associated to the request.
"""
request = models.ForeignKey(Request, verbose_name=_('Request'), related_name="url_parameters",
help_text=_("Request which this Url Parameter is attached to"))
class Meta:
verbose_name = _("Url Parameter")
verbose_name_plural = _("Url Parameters")
class HeaderParam(AbstractParam):
"""
Header Parameter associated to the request
"""
request = models.ForeignKey(Request, verbose_name=_('Request'), related_name="header_parameters",
help_text=_("Request which this Url Parameter is attached to"))
class Meta:
verbose_name = _("Header Parameter")
verbose_name_plural = _("Header Parameters")
@python_2_unicode_compatible
class Handler(PermabotsModel):
"""
Model to handler conversation message
"""
bot = models.ForeignKey(Bot, verbose_name=_('Bot'), related_name="handlers",
help_text=_("Bot which Handler is attached to"))
name = models.CharField(_('Name'), max_length=100, db_index=True, help_text=_("Name for the handler"))
pattern = models.CharField(_('Pattern'), max_length=255, validators=[validators.validate_pattern],
help_text=_("""Regular expression the Handler will be triggered.
Using https://docs.python.org/2/library/re.html#regular-expression-syntax"""))
request = models.OneToOneField(Request, null=True, blank=True, help_text=_("Request the Handler processes"),
on_delete=models.SET_NULL)
response = models.OneToOneField(Response, help_text=_("Template the handler uses to generate response"))
enabled = models.BooleanField(_('Enable'), default=True, help_text=_("Enable/disable handler"))
source_states = models.ManyToManyField('State', verbose_name=_('Source States'), related_name='source_handlers', blank=True,
help_text=_("Bot states the Handler needs to be to execute. Set none if any"))
target_state = models.ForeignKey('State', verbose_name=_('Target State'), related_name='target_handlers', null=True, blank=True,
help_text=_("This state will be set when handler ends processing"), on_delete=models.SET_NULL)
priority = models.IntegerField(_('Priority'), default=0,
help_text=_("Set priority execution. Higher value higher priority"))
class Meta:
verbose_name = _('Handler')
verbose_name_plural = _('Handlers')
ordering = ['-priority']
def __str__(self):
return "%s" % self.name
def urlpattern(self):
return url(self.pattern, self.process)
def process(self, bot, message, service, state_context, **pattern_context):
"""
Process conversation message.
1. Generates context
* service: name of integration service
* state_context: historic dict of previous contexts. identified by state
* pattern: url pattern dict
* env: dict of environment variables associated to this bot
* message: provider message
* emoji: dict of emojis use named notation with underscores `<http://apps.timwhitlock.info/emoji/tables/unicode>` _.
2. Process request (if required)
3. Generates response. Text and Keyboard
4. Prepare target_state and context for updating chat&state info
:param bot: Bot the handler belongs to
:type Bot: :class:`Bot <permabots.models.bot.Bot>`
:param message: Message from provider
:param service: Identity integration
:type service: string
:param state_context: Previous contexts
:type state_context: dict
:param pattern_context: Dict variables obtained from handler pattern regular expression.
:type pattern_context: dict
:returns: Text and keyboard response, new state for the chat and context used.
"""
env = {}
for env_var in caching.get_or_set_related(bot, 'env_vars'):
env.update(env_var.as_json())
context = {'service': service,
'state_context': state_context,
'pattern': pattern_context,
'env': env,
'message': message.to_dict(),
'emoji': utils.create_emoji_context()}
response_context = {}
success = True
if self.request:
r = self.request.process(**context)
logger.debug("Handler %s get request %s" % (self, r))
success = is_success(r.status_code)
response_context['status'] = r.status_code
try:
response_context['data'] = r.json()
except:
response_context['data'] = {}
context['response'] = response_context
response_text, response_keyboard = self.response.process(**context)
# update ChatState
if self.target_state and success:
context.pop('message', None)
context.pop('env', None)
context.pop('state_context', None)
context.pop('service', None)
context.pop('emoji', None)
target_state = self.target_state
else:
target_state = None
logger.warning("No target state for handler:%s for message %s" %
(self, message))
return response_text, response_keyboard, target_state, context
|
|
#!/usr/bin/env python
"""
#!/usr/bin/env python
Created on Fri Sep 16 23:28:53 2016
@author: dennis
"""
import rospy
from mavros_msgs.msg import State, AttitudeTarget, PositionTarget, AvoidanceTriplet
from nav_msgs.msg import Odometry
from geometry_msgs.msg import PoseStamped, TwistStamped, Vector3Stamped, Quaternion, Vector3, Point
from sensor_msgs.msg import Imu
from nav_msgs.msg import Path
import time
from tf.transformations import *
import numpy as np
import common_functions as cf
import bezier_fn as bf
import pub_bezier
from dynamic_reconfigure.server import Server
from offboard.cfg import PIDConfig
from offboard.msg import ThreePointMsg
import controller
### constant
RATE_STATE = 1 # state rate subscription
class pidCoeff():
def __init__(self):
self.Pp = np.zeros(3)
self.Pv = np.zeros(3)
self.Iv = np.zeros(3)
self.Dv = np.zeros(3)
self.Pa = np.zeros(3)
self.Ia = np.zeros(3)
self.Da = np.zeros(3)
self.Mxy = 0.0
self.Mz = 0.0
### class for subscription ###
class mapping():
def __init__(self, nh):
self._run_bz_controller = False
# vel pub
self._vel_pub = rospy.Publisher('mavros/setpoint_velocity/cmd_vel', TwistStamped, queue_size=10 )
self._vel_msg = TwistStamped()
# acc pub
self._accel_pub = rospy.Publisher('/mavros/setpoint_accel/accel', Vector3Stamped, queue_size=10 )
self._accel_msg = Vector3Stamped()
# attitude
self._att_pub = rospy.Publisher('/mavros/setpoint_raw/attitude', AttitudeTarget, queue_size=10)
self._att_msg = AttitudeTarget()
self._att_msg.type_mask = 7
# local raw: send acceleration and yaw
self._acc_yaw_pub = rospy.Publisher('/mavros/setpoint_raw/local', PositionTarget, queue_size= 10)
self._acc_yaw_msg = PositionTarget()
self._acc_yaw_msg.type_mask = 2048 + 32 + 16 + 8 + 4 + 2 + 1 #+ 512
# local raw: send velocity and yaw
self._vel_yaw_pub = rospy.Publisher('/mavros/setpoint_raw/local', PositionTarget, queue_size= 10)
self._vel_yaw_msg = PositionTarget()
self._vel_yaw_msg.type_mask = 1 + 2 + 4 + 64 + 128 + 256 + 2048
# path bezier triplet send
self._bezier_triplet_pub = rospy.Publisher('/mavros/avoidance_triplet', AvoidanceTriplet, queue_size=10)
self._bezier_triplet_msg = AvoidanceTriplet()
self._bezier_duration = 1.0
# initlaize publisher for visualization
self._pub_visualize = pub_bezier.pub_bezier()
# dt
self._dt = 0.0
# call back variables
self._pid_coeff = pidCoeff()
#print self._pid_coeff
# pid tuning parameters with first callback
Server(PIDConfig, self._pidcallback)
self._ctr = controller.controller(self._pid_coeff, 9.91)
### subscriber ###
# state subscriber
self._rate_state = rospy.Rate(RATE_STATE)
self._current_state = State()
rospy.Subscriber('/mavros/state', State , self._current_state_cb)
# subscriber,
self._local_pose = PoseStamped()
self._local_pose.pose.position = cf.p_numpy_to_ros([0.0,0.0,0.0])
rospy.Subscriber('/mavros/local_position/pose', PoseStamped, self._local_pose_cb)
self._local_vel = TwistStamped()
self._local_vel.twist.linear = cf.p_numpy_to_ros([0.0,0.0,0.0])
rospy.Subscriber('/mavros/local_position/velocity', TwistStamped, self._local_vel_cb)
self._bezier_pt = []
'''self._bezier_pt[0] = cf.p_numpy_to_ros([0.0,0.0,0.0])
self._bezier_pt[1] = cf.p_numpy_to_ros([0.0,0.0,0.0])
self._bezier_pt[2] = cf.p_numpy_to_ros([0.0,0.0,0.0]'''
#self._bezier_duration = 1.0
rospy.Subscriber('/path/bezier_pt', Path, self._bezier_cb)
rospy.Subscriber('/path/three_point_message', ThreePointMsg, self._three_point_msg_cb)
self._linear_acc = Vector3()
self._linear_acc = cf.p_numpy_to_ros_vector([0.0,0.0,0.0])
rospy.Subscriber('/mavros/imu/data', Imu, self._imu_cb)
#= {"Pxy_p": 0.0, "Pz_p": 0.0, "Pxy_v":0.0 , "Pz_v":0.0, "Dxy_v":0.0 , "Dz_v":0.0 , "Ixy_v":0.0 , "Iz_v":0.0, "M":0.0 }
# controller
def _pub_thrust_sp_desired(self):
#self._local_pose.pose.position = np.array([1,2,3])
#self._local_vel_twist_linear = np.array([1,2,3])
# get current position p_c, v_c, a_c in local frame
p_c = cf.p_ros_to_numpy(self._local_pose.pose.position)
q_c = cf.q_ros_to_numpy(self._local_pose.pose.orientation)
v_c =cf.p_ros_to_numpy(self._local_vel.twist.linear)
a_c = cf.p_ros_to_numpy(self._linear_acc) # bodyframe
a_c = np.dot(cf.rotation_from_q_transpose(q_c), a_c) # world frame
# bezier points
bz = [cf.p_ros_to_numpy(self._bezier_pt[0]), \
cf.p_ros_to_numpy(self._bezier_pt[1]), \
cf.p_ros_to_numpy(self._bezier_pt[2])]
# get closest point p*, velocity v* and acceleration a*
p_star, v_star, a_star = bf.point_closest_to_bezier(bz, p_c, self._bezier_duration)
'''p_star = np.array([0.0,0.0,5.0])
v_star = np.array([0.0,0.0,0.0])
a_star = np.array([0.0,0.0,0])'''
# set states
self._ctr.set_states(p_c, v_c, a_c, p_star, v_star, a_star, self._pid_coeff)
# compute desired thrust
thrust_des, v_sp, vc = self._ctr.update_thrust_old(time.time())
# send vel and thrust vector
self._visualize_vel(p_c, vc)
self._visualize_acc(p_c, v_sp )
self._visualize_x(p_c)
self._visualize_target(p_star)
# get correct yaw
# get yaw angle error
yaw_desired = 0.0
v_star_norm= np.linalg.norm(v_star)
z = np.array([0.0,0.0,1.0])
if (v_star_norm > 0.0) and not (np.array_equal(np.abs(v_star/v_star_norm), z)): #yaw not defined if norm(v_des) or v_des == z
# get current yaw
yaw_desired = self.get_desired_yaw(v_star) - np.pi/2.0
# assign to msg
self._acc_yaw_msg.acceleration_or_force = cf.p_numpy_to_ros_vector(thrust_des)
self._acc_yaw_msg.yaw = yaw_desired
# publish
self._acc_yaw_pub.publish(self._acc_yaw_msg)
def _pub_att_desired(self):
q = Quaternion()
q.x =0.0
q.y = 0.0
q.z = 1.0
q.w = 0.0
self._att_msg.orientation = q
self._att_msg.thrust =1.0
self._att_pub.publish(self._att_msg)
def _pub_acc_yaw_desired(self):
a = Vector3()
a.x = 0.0
a.y = 0.0
a.z = 0.2
self._acc_yaw_msg.acceleration_or_force = a
#self._local_msg.yaw = 0.0
self._local_pub.publish(self._acc_yaw_msg)
def _pub_v_desired(self):
# get current position
pose = cf.p_ros_to_numpy(self._local_pose.pose.position)
bz = [cf.p_ros_to_numpy(self._bezier_pt[0]), \
cf.p_ros_to_numpy(self._bezier_pt[1]), \
cf.p_ros_to_numpy(self._bezier_pt[2])]
# get closest point and velocity to bezier
p_des, v_des, a_des = bf.point_closest_to_bezier(bz, pose, self._bezier_duration)
print a_des
# send velocity vector
self._visualize_vel(p_des, v_des)
self._visualize_x(pose)
# get desired velocity
v_final = bf.vel_adjusted(p_des, v_des, pose)
v_final *= min(np.linalg.norm(v_final), 3.0) / np.linalg.norm(v_final)
# get yaw angle error
theta = 0.0
v_des_norm= np.linalg.norm(v_des)
z = np.array([0.0,0.0,1.0])
if (v_des_norm > 0.0) and not (np.array_equal(np.abs(v_des/v_des_norm), z)): #yaw not defined if norm(v_des) or v_des == z
theta = self.angle_error(v_des)
# get current yaw
yaw_desired = self.get_desired_yaw(v_des) - np.pi/2.0
# assign to msg
self._vel_yaw_msg.velocity = cf.p_numpy_to_ros_vector(v_final)
self._vel_yaw_msg.yaw = yaw_desired
# publish
self._vel_yaw_pub.publish(self._vel_yaw_msg)
def _visualize_x(self, pose):
# current orientation
q_c = cf.q_ros_to_numpy(self._local_pose.pose.orientation)
# body frame x
x_b = np.array([1.0,0.0,0.0])
# convert to world frame
x = np.dot(cf.rotation_from_q_transpose(q_c), x_b)
pt = cf.p_numpy_to_ros(pose)
pt2 = cf.p_numpy_to_ros(pose + x)
pts = [pt, pt2]
self._pub_visualize.pub_x_vec(pts)
def _visualize_target(self, p):
pt = cf.p_numpy_to_ros(p)
self._pub_visualize.pub_target(pt)
def _visualize_vel(self, p, v):
pt = cf.p_numpy_to_ros(p)
pt2 = cf.p_numpy_to_ros(v + p)
points = [pt, pt2]
self._pub_visualize.pub_velocity(points)
def _visualize_acc(self, p, a):
pt = cf.p_numpy_to_ros(p)
pt2 = cf.p_numpy_to_ros( p + a)
points = [pt, pt2]
self._pub_visualize.pub_a_vec(points)
def _pub_a_desired(self):
# get current position, velocity
pose = cf.p_ros_to_numpy(self._local_pose.pose.position)
velocity = cf.p_ros_to_numpy(self._local_vel.twist.linear)
bz = [cf.p_ros_to_numpy(self._bezier_pt[0]), \
cf.p_ros_to_numpy(self._bezier_pt[1]), \
cf.p_ros_to_numpy(self._bezier_pt[2])]
# get closest point and velocity and acceleration to bezier
p_des, v_des, a_des = bf.point_closest_to_bezier(bz, pose, self._bezier_duration)
# get desired velocity
#a_final = bf.accel_adjusted(p_des, v_des, a_des, pose, velocity)
print a_des
#print "a_des : {}\t v_des : {}\t p_des: {}".format(a_des, v_des, p_des)
# get yaw angle error
'''theta = 0.0
v_des_norm= np.linalg.norm(v_des)
z = np.array([0.0,0.0,1.0])
if (v_des_norm > 0.0) and not (np.array_equal(np.abs(v_des/v_des_norm), z)): #yaw not defined if norm(v_des) or v_des == z
theta = self.angle_error(v_des)'''
#a_final = np.array([0.0,0.0,0.52])
# assign to msg
self._accel_msg.vector = cf.p_numpy_to_ros(a_des)
# publish
self._accel_pub.publish(self._accel_msg)
# finds closest point on circel to a specific point
def angle_error(self, v_des):
# current orrientation
q_c = cf.q_ros_to_numpy(self._local_pose.pose.orientation)
# convert v_des to body frame
vb_des = np.dot(cf.rotation_from_q(q_c), v_des)
# body z axis x
z = np.array([0.0,0.0,1.0])
x = np.array([1.0,0.0,0.0])
# project onto xy body plane
vb_des_proj = vb_des - z * np.dot(z, np.transpose(vb_des))
# normalize
vb_proj_n = vb_des_proj / np.linalg.norm(vb_des_proj)
# get angle
theta = np.arccos(np.dot(x, np.transpose(vb_proj_n)))
# determine sign
cross = np.cross(x, vb_proj_n)
if ( cross[2] < 0.0 ):
theta *= -1.0
#print theta
return theta
# get desired yaw
def get_desired_yaw(self, v_des):
# z axis
z = np.array([0.0,0.0,1.0])
x = np.array([1.0,0.0,0.0])
# project v_des onto xy plane
v_des_proj = v_des - z * np.dot(z, np.transpose(v_des))
v_des_p_n = v_des_proj / np.linalg.norm(v_des_proj)
# angle between v_des_prj and x
angle = np.arccos(np.dot(x, np.transpose(v_des_p_n)))
# sign
cross = np.cross(x, v_des_p_n)
if (cross[2] < 0.0):
angle *= -1
return angle
# current yaw
def get_current_yaw(self):
# current orrientation
q_c = cf.q_ros_to_numpy(self._local_pose.pose.orientation)
# body frame x
x_b = np.array([1.0,0.0,0.0])
# convert to world frame
x_w = np.dot(cf.rotation_from_q_transpose(q_c), x_b)
# norm of xy plane
z = np.array([0.0,0.0,1.0])
x = np.array([1.0,0.0,0.0])
# pojecto on xy placne of world frame
x_w_proj = x_w - z * np.dot(z, np.transpose(x_w))
# normalize
x_w_proj_n = x_w_proj / np.linalg.norm(x_w_proj)
# get angle
yaw = np.arccos(np.dot(x, np.transpose(x_w_proj_n)))
#determine sign
cross = np.cross(x, x_w_proj_n)
if (cross[2] < 0.0):
yaw *= -1.0
return yaw
def send_bezier_triplet(self):
self._bezier_triplet_msg.prev = self._bezier_pt[0]
self._bezier_triplet_msg.ctrl = self._bezier_pt[1]
self._bezier_triplet_msg.next = self._bezier_pt[2]
self._bezier_triplet_msg.acc_per_err = 0.0
self._bezier_triplet_msg.duration = 1.0
self._bezier_triplet_msg.max_acc = 5.0
print self._bezier_pt
self._bezier_triplet_pub.publish(self._bezier_triplet_msg)
### callback functions ###
def _current_state_cb(self, data):
self._current_state = data
def _local_pose_cb(self, data):
self._local_pose = data
def _local_vel_cb(self, data):
self._local_vel = data
def _imu_cb(self, data):
self._linear_acc = data.linear_acceleration
if self._run_bz_controller:
self._pub_thrust_sp_desired()
def _bezier_cb(self, data):
self._bezier_pt = [pose.pose.position for pose in data.poses]
#self.send_bezier_triplet()
self._run_bz_controller = True
# self._bezier_pt = [pose.pose.position for pose in data.poses]
# self._run_bz_controller = True
# #self._pub_a_desired()
def _three_point_msg_cb(self, data):
self._bezier_pt = [data.prev, data.ctrl, data.next]
self._bezier_duration = data.duration
self._run_bz_controller = True
def _pidcallback(self, config, level):
rospy.loginfo("""Reconfigure Request: {Pxy_p}, {Pz_p},\
#{Pxy_v}, {Pz_v}, {Ixy_v}, {Ixy_v}, {Iz_v}, {Dxy_v}, {Dz_v}, {Mz}, {Mxy}""".format(**config))
# pid struct
pid = pidCoeff()
pid.Pp[0] = config.Pxy_p
pid.Pp[1] = config.Pxy_p
pid.Pp[2] = config.Pz_p
pid.Pv[0] = config.Pxy_v
pid.Pv[1] = config.Pxy_v
pid.Pv[2] = config.Pz_v
pid.Iv[0] = config.Ixy_v
pid.Iv[1] = config.Ixy_v
pid.Iv[2] = config.Iz_v
pid.Dv[0] = config.Dxy_v
pid.Dv[1] = config.Dxy_v
pid.Dv[2] = config.Dz_v
pid.Pa[0] = config.Pxy_a
pid.Pa[1] = config.Pxy_a
pid.Pa[2] = config.Pz_a
pid.Ia[0] = config.Ixy_a
pid.Ia[1] = config.Ixy_a
pid.Ia[2] = config.Iz_a
pid.Da[0] = config.Dxy_a
pid.Da[1] = config.Dxy_a
pid.Da[2] = config.Dz_a
pid.Mxy= config.Mxy
pid.Mz = config.Mz
self._pid_coeff = pid
print self._pid_coeff.Pp
return config
# node enter point
def start():
# create ros node handle
nh = rospy.init_node('beziermapping')
#nh = "fff"
# create mapping obj
mp = mapping(nh)
rospy.spin()
'''r = rospy.Rate(150)
while not rospy.is_shutdown():
if mp._run_bz_controller:
mp._pub_thrust_sp_desired()
r.sleep()'''
if __name__ == '__main__':
start()
|
|
import os
from collections import OrderedDict
from corpustools.acousticsim.io import load_path_mapping
from corpustools.exceptions import PCTPythonError
try:
real_acousticsim = True
from acousticsim.main import(acoustic_similarity_mapping,
acoustic_similarity_directories,
analyze_directory, AcousticSimError)
except (ImportError, ModuleNotFoundError) as e:
real_acousticsim = False
from corpustools.acousticsim.main import(acoustic_similarity_mapping,
acoustic_similarity_directories,
analyze_directory, AcousticSimError)
from .imports import *
from .widgets import DirectoryWidget, RadioSelectWidget, FileWidget
from .windows import FunctionWorker, FunctionDialog
from corpustools import __version__
class ASWorker(FunctionWorker):
def run(self):
kwargs = self.kwargs
self.results = list()
if kwargs['type'] == 'one':
try:
asim = analyze_directory(kwargs['query'], **kwargs)
except AcousticSimError as e:
self.errorEncountered.emit(e)
return
except Exception as e:
e = PCTPythonError(e)
self.errorEncountered.emit(e)
return
elif kwargs['type'] == 'two':
try:
asim, output_val = acoustic_similarity_directories(*kwargs['query'],**kwargs)
except AcousticSimError as e:
self.errorEncountered.emit(e)
return
except Exception as e:
e = PCTPythonError(e)
self.errorEncountered.emit(e)
return
#asim[(kwargs['query'][0],kwargs['query'][1])] = output_val
elif kwargs['type'] == 'file':
try:
asim = acoustic_similarity_mapping(kwargs['query'], **kwargs)
except AcousticSimError as e:
self.errorEncountered.emit(e)
return
except Exception as e:
e = PCTPythonError(e)
self.errorEncountered.emit(e)
return
if self.stopped:
return
for k,v in asim.items():
if self.stopped:
return
self.results.append(list(k) + [v])
if kwargs['type'] == 'two':
self.results.append([os.path.basename(kwargs['query'][0]),os.path.basename(kwargs['query'][1]), output_val])
else:
self.results.append(['AVG', 'AVG',sum(asim.values())/len(asim)])
if self.stopped:
self.finishedCancelling.emit()
return
self.dataReady.emit(self.results)
class ASDialog(FunctionDialog):
header = ['PCT ver.',
'Analysis name',
'File 1',
'File 2',
'Representation',
'Match function',
'Minimum frequency',
'Maximum frequency',
'Number of filters',
'Number of coefficients',
'Is similarity',
'Result']
_about = [('This function calculates the acoustic similarity of sound files in two'
' directories by generating either MFCCs or amplitude envelopes for each'
' sound file and using dynamic time warping or cross-correlation to get '
'the average distance/similarity across all tokens.'),
'',
'References: ',
('Ellis, Daniel P. W. 2005. PLP and RASTA (and MFCC, and'
' inversion) in Matlab (online web resource).'
' http://www.ee.columbia.edu/~dpwe/resources/matlab/rastamat/.'),
('Lewandowski, Natalie. 2012. Talent in nonnative phonetic'
' convergence. PhD Thesis.')]
name = 'acoustic similarity'
def __init__(self, parent, settings, showToolTips):
FunctionDialog.__init__(self, parent, settings, ASWorker())
self.showToolTips = showToolTips
aslayout = QHBoxLayout()
compFrame = QGroupBox('Comparison type')
vbox = QFormLayout()
self.compType = None
self.oneDirectoryRadio = QRadioButton('Analyze single directory')
self.oneDirectoryRadio.clicked.connect(self.oneDirectorySelected)
self.oneDirectoryWidget = DirectoryWidget()
self.oneDirectoryWidget.textChanged.connect(self.oneDirectoryRadio.click)
self.twoDirectoryRadio = QRadioButton('Compare two directories')
self.twoDirectoryRadio.clicked.connect(self.twoDirectoriesSelected)
self.directoryOneWidget = DirectoryWidget()
self.directoryOneWidget.textChanged.connect(self.twoDirectoryRadio.click)
self.directoryTwoWidget = DirectoryWidget()
self.directoryTwoWidget.textChanged.connect(self.twoDirectoryRadio.click)
self.fileRadio = QRadioButton('Use list of full path comparisons')
self.fileRadio.clicked.connect(self.fileSelected)
self.fileWidget = FileWidget('Select a word pairs file', 'Text file (*.txt *.csv)')
self.fileWidget.textChanged.connect(self.fileRadio.click)
vbox.addRow(self.oneDirectoryRadio)
vbox.addRow(QLabel('Directory:'))
vbox.addRow(self.oneDirectoryWidget)
vbox.addRow(self.twoDirectoryRadio)
vbox.addRow(QLabel('First directory:'))
vbox.addRow(self.directoryOneWidget)
vbox.addRow(QLabel('Second directory:'))
vbox.addRow(self.directoryTwoWidget)
vbox.addRow(self.fileRadio)
vbox.addRow(self.fileWidget)
compFrame.setLayout(vbox)
aslayout.addWidget(compFrame)
optionLayout = QVBoxLayout()
validator = QDoubleValidator(float('inf'), 0, 8)
self.representationWidget = RadioSelectWidget('Represenation',
OrderedDict([('MFCC','mfcc'),
('Amplitude envelopes','envelopes')]),
{'MFCC':self.mfccSelected,
'Amplitude envelopes':self.envelopesSelected})
optionLayout.addWidget(self.representationWidget)
self.distAlgWidget = RadioSelectWidget('Distance algorithm',
OrderedDict([('Dynamic time warping','dtw'),
('Cross-correlation','xcorr')]))
optionLayout.addWidget(self.distAlgWidget)
freqLimFrame = QGroupBox('Frequency limits')
box = QFormLayout()
self.minFreqEdit = QLineEdit()
self.minFreqEdit.setValidator(validator)
self.minFreqEdit.setText('80')
self.maxFreqEdit = QLineEdit()
self.maxFreqEdit.setValidator(validator)
self.maxFreqEdit.setText('7800')
box.addRow('Minimum frequency (Hz):',self.minFreqEdit)
box.addRow('Maximum frequency (Hz):',self.maxFreqEdit)
freqLimFrame.setLayout(box)
optionLayout.addWidget(freqLimFrame)
freqResFrame = QGroupBox('Frequency resolution')
box = QFormLayout()
self.filterEdit = QLineEdit()
self.filterEdit.setValidator(validator)
self.coeffEdit = QLineEdit()
self.coeffEdit.setValidator(validator)
box.addRow('Number of filters:',self.filterEdit)
box.addRow('Number of coefficients (MFCC only):',self.coeffEdit)
freqResFrame.setLayout(box)
optionLayout.addWidget(freqResFrame)
self.outputSimWidget = QCheckBox('Output as similarity (0 to 1)')
optionLayout.addWidget(self.outputSimWidget)
#self.multiprocessingWidget = QCheckBox('Use multiprocessing')
#optionLayout.addWidget(self.multiprocessingWidget)
if real_acousticsim:
optionLayout.addWidget(QLabel('Acoustic similarity benefits from multiprocessing.'))
optionLayout.addWidget(QLabel('Multiprocessing can be enabled in Preferences.'))
else:
optionLayout.addWidget(QLabel('The acoustic similarity module loaded\ndoes not support multiprocessing.'))
optionLayout.addWidget(QLabel('Install python-acoustic-similarity\nto access multiprocessing and additional _features.'))
optionFrame = QFrame()
optionFrame.setLayout(optionLayout)
aslayout.addWidget(optionFrame)
asframe = QFrame()
asframe.setLayout(aslayout)
self.layout().insertWidget(0,asframe)
self.representationWidget.initialClick()
if self.showToolTips:
compFrame.setToolTip(("<FONT COLOR=black>"
'Choose two directories to compare sound files between.'
"</FONT>"))
self.representationWidget.setToolTip(("<FONT COLOR=black>"
'Choose how to represent acoustic waveforms.'
"</FONT>"))
self.distAlgWidget.setToolTip(("<FONT COLOR=black>"
'Choose how to compare representations.'
"</FONT>"))
freqLimFrame.setToolTip(("<FONT COLOR=black>"
'Choose frequency range.'
"</FONT>"))
freqResFrame.setToolTip(("<FONT COLOR=black>"
'Choose how many filters to divide the frequency range'
' and how many coefficients to use for MFCC generation.'
' Leave blank for reasonable defaults based on the'
' representation.'
"</FONT>"))
self.outputSimWidget.setToolTip(("<FONT COLOR=black>"
'Choose whether the result should be similarity'
' or distance. Similarity is inverse distance,'
' and distance is inverse similarity'
"</FONT>"))
#self.multiprocessingWidget.setToolTip(("<FONT COLOR=black>"
#'Choose whether to use multiple processes.'
# ' Multiprocessing is currently not supported'
#"</FONT>"))
def oneDirectorySelected(self):
self.compType = 'one'
def twoDirectoriesSelected(self):
self.compType = 'two'
def fileSelected(self):
self.compType = 'file'
def mfccSelected(self):
self.coeffEdit.setEnabled(True)
self.filterEdit.setText('26')
self.coeffEdit.setText('12')
def envelopesSelected(self):
self.coeffEdit.setText('N/A')
self.coeffEdit.setEnabled(False)
self.filterEdit.setText('8')
def generateKwargs(self):
rep = self.representationWidget.value()
alg = self.distAlgWidget.value()
try:
filters = int(self.filterEdit.text())
if filters < 0:
raise(ValueError)
except ValueError:
reply = QMessageBox.critical(self,
"Invalid information", "The number of filters must be a number greater than 0.")
return
try:
coeffs = int(self.coeffEdit.text())
if coeffs <= 0:
raise(ValueError)
if int(self.coeffEdit.text()) > int(self.filterEdit.text())-1:
raise(ValueError)
except ValueError:
if rep == 'mfcc':
reply = QMessageBox.critical(self,
"Invalid information", "The number of coefficients must be a number greater than 0 and less than the number of filters.")
return
try:
freq_lims = (float(self.minFreqEdit.text()),float(self.maxFreqEdit.text()))
if freq_lims[0] < 0 or freq_lims[1] < 0:
raise(ValueError("The minimum and maximum frequenies must be greater than 0."))
if freq_lims[0] >= freq_lims[1]:
raise(ValueError("The maximum frequeny must be greater than the minimum frequency."))
except ValueError as e:
reply = QMessageBox.critical(self,
"Invalid information", str(e))
return
kwargs = {
'type': self.compType,
'rep':rep,
'match_func':alg,
'num_filters':filters,
'freq_lims':freq_lims,
'output_sim':self.outputSimWidget.isChecked(),
'use_multi':self.settings['use_multi'],
'num_cores':self.settings['num_cores'],
'return_all':True}
if rep == 'mfcc':
kwargs['num_coeffs'] = coeffs
if self.compType is None:
reply = QMessageBox.critical(self,
"Missing information", "Please specify a comparison type.")
return
elif self.compType == 'one':
kwargs['query'] = self.oneDirectoryWidget.value()
elif self.compType == 'two':
dirOne = self.directoryOneWidget.value()
if dirOne == '':
reply = QMessageBox.critical(self,
"Missing information", "Please specify the first directory.")
return
if not os.path.exists(dirOne):
reply = QMessageBox.critical(self,
"Invalid information", "The first directory does not exist.")
return
dirTwo = self.directoryTwoWidget.value()
if dirTwo == '':
reply = QMessageBox.critical(self,
"Missing information", "Please specify the second directory.")
return
if not os.path.exists(dirTwo):
reply = QMessageBox.critical(self,
"Invalid information", "The second directory does not exist.")
return
kwargs['query'] = [dirOne, dirTwo]
elif self.compType == 'file':
path = self.fileWidget.value()
if path == '':
reply = QMessageBox.critical(self,
"Missing information", "Please specify a path mapping file.")
return
if not os.path.exists(path):
reply = QMessageBox.critical(self,
"Invalid information", "The specified path mapping file does not exist.")
return
try:
kwargs['query'] = load_path_mapping(path)
except OSError as e:
reply = QMessageBox.critical(self,
"Invalid information", str(e))
return
return kwargs
def calc(self):
kwargs = self.generateKwargs()
if kwargs is None:
return
self.thread.setParams(kwargs)
self.thread.start()
result = self.progressDialog.exec_()
self.progressDialog.reset()
if result:
self.accept()
def setResults(self,results):
self.results = list()
for r in results:
self.results.append({'PCT ver.': __version__,#self.corpusModel.corpus._version,
'Analysis name': self.name.capitalize(),
'File 1': r[0],
'File 2': r[1],
'Representation': self.representationWidget.displayValue(),
'Match function': self.distAlgWidget.displayValue(),
'Minimum frequency': float(self.minFreqEdit.text()),
'Maximum frequency': float(self.maxFreqEdit.text()),
'Number of filters': int(self.filterEdit.text()),
'Number of coefficients': self.coeffEdit.text(),
'Is similarity': self.outputSimWidget.isChecked(),
'Result': r[2]})
|
|
import json
import logging
import re
import threading
import time
from urllib.parse import unquote_plus, urlparse
import websocket
from streamlink.plugin import Plugin, PluginArgument, PluginArguments
from streamlink.plugin.api import useragents
from streamlink.stream import HLSStream
_log = logging.getLogger(__name__)
_url_re = re.compile(
r"^https?://(?P<domain>live[0-9]*\.nicovideo\.jp)/watch/lv[0-9]*")
_login_url = "https://account.nicovideo.jp/login/redirector"
_login_url_params = {
"show_button_twitter": 1,
"show_button_facebook": 1,
"next_url": "/"}
class NicoLive(Plugin):
arguments = PluginArguments(
PluginArgument(
"email",
argument_name="niconico-email",
sensitive=True,
metavar="EMAIL",
help="The email or phone number associated with your "
"Niconico account"),
PluginArgument(
"password",
argument_name="niconico-password",
sensitive=True,
metavar="PASSWORD",
help="The password of your Niconico account"),
PluginArgument(
"user-session",
argument_name="niconico-user-session",
sensitive=True,
metavar="VALUE",
help="Value of the user-session token \n(can be used in "
"case you do not want to put your password here)"))
is_stream_ready = False
is_stream_ended = False
watching_interval = 30
watching_interval_worker_thread = None
stream_reader = None
_ws = None
frontend_id = None
@classmethod
def can_handle_url(cls, url):
return _url_re.match(url) is not None
def _get_streams(self):
self.url = self.url.split("?")[0]
self.session.http.headers.update({
"User-Agent": useragents.CHROME,
})
if not self.get_wss_api_url():
_log.debug("Coundn't extract wss_api_url. Attempting login...")
if not self.niconico_web_login():
return None
if not self.get_wss_api_url():
_log.error("Failed to get wss_api_url.")
_log.error(
"Please check if the URL is correct, "
"and make sure your account has access to the video.")
return None
self.api_connect(self.wss_api_url)
i = 0
while not self.is_stream_ready:
if i % 10 == 0:
_log.debug("Waiting for permit...")
if i == 600:
_log.error("Waiting for permit timed out.")
return None
if self.is_stream_ended:
return None
time.sleep(0.1)
i += 1
streams = HLSStream.parse_variant_playlist(
self.session, self.hls_stream_url)
nico_streams = {}
for s in streams:
nico_stream = NicoHLSStream(streams[s], self)
nico_streams[s] = nico_stream
return nico_streams
def get_wss_api_url(self):
_log.debug("Getting video page: {0}".format(self.url))
resp = self.session.http.get(self.url)
try:
self.wss_api_url = extract_text(
resp.text, ""webSocketUrl":"", """)
if not self.wss_api_url:
return False
except Exception as e:
_log.debug(e)
_log.debug("Failed to extract wss api url")
return False
try:
self.frontend_id = extract_text(
resp.text, ""frontendId":", ","")
except Exception as e:
_log.debug(e)
_log.warning("Failed to extract frontend id")
self.wss_api_url = "{0}&frontend_id={1}".format(self.wss_api_url, self.frontend_id)
_log.debug("Video page response code: {0}".format(resp.status_code))
_log.trace("Video page response body: {0}".format(resp.text))
_log.debug("Got wss_api_url: {0}".format(self.wss_api_url))
_log.debug("Got frontend_id: {0}".format(self.frontend_id))
return self.wss_api_url.startswith("wss://")
def api_on_open(self):
self.send_playerversion()
require_new_stream = not self.is_stream_ready
self.send_getpermit(require_new_stream=require_new_stream)
def api_on_error(self, ws, error=None):
if error:
_log.warning(error)
_log.warning("wss api disconnected.")
_log.warning("Attempting to reconnect in 5 secs...")
time.sleep(5)
self.api_connect(self.wss_api_url)
def api_connect(self, url):
# Proxy support adapted from the UStreamTV plugin (ustreamtv.py)
proxy_url = self.session.get_option("https-proxy")
if proxy_url is None:
proxy_url = self.session.get_option("http-proxy")
proxy_options = parse_proxy_url(proxy_url)
if proxy_options.get('http_proxy_host'):
_log.debug("Using proxy ({0}://{1}:{2})".format(
proxy_options.get('proxy_type') or "http",
proxy_options.get('http_proxy_host'),
proxy_options.get('http_proxy_port') or 80))
_log.debug("Connecting: {0}".format(url))
self._ws = websocket.WebSocketApp(
url,
header=["User-Agent: {0}".format(useragents.CHROME)],
on_open=self.api_on_open,
on_message=self.handle_api_message,
on_error=self.api_on_error)
self.ws_worker_thread = threading.Thread(
target=self._ws.run_forever,
args=proxy_options)
self.ws_worker_thread.daemon = True
self.ws_worker_thread.start()
def send_message(self, type_, body):
msg = {"type": type_, "body": body}
msg_json = json.dumps(msg)
_log.debug(f"Sending: {msg_json}")
if self._ws and self._ws.sock.connected:
self._ws.send(msg_json)
else:
_log.warning("wss api is not connected.")
def send_no_body_message(self, type_):
msg = {"type": type_}
msg_json = json.dumps(msg)
_log.debug(f"Sending: {msg_json}")
if self._ws and self._ws.sock.connected:
self._ws.send(msg_json)
else:
_log.warning("wss api is not connected.")
def send_custom_message(self, msg):
msg_json = json.dumps(msg)
_log.debug(f"Sending: {msg_json}")
if self._ws and self._ws.sock.connected:
self._ws.send(msg_json)
else:
_log.warning("wss api is not connected.")
def send_playerversion(self):
body = {
"type": "startWatching",
"data": {
"stream": {
"quality": "abr",
"protocol": "hls",
"latency": "high",
"chasePlay": False
},
"room": {
"protocol": "webSocket",
"commentable": True
},
"reconnect": False
}
}
self.send_custom_message(body)
def send_getpermit(self, require_new_stream=True):
body = {
"type": "getAkashic",
"data": {
"chasePlay": False
}
}
self.send_custom_message(body)
def send_watching(self):
body = {
"command": "watching",
"params": [self.broadcast_id, "-1", "0"]
}
self.send_message("watch", body)
def send_pong(self):
self.send_no_body_message("pong")
self.send_no_body_message("keepSeat")
def handle_api_message(self, message):
_log.debug(f"Received: {message}")
message_parsed = json.loads(message)
if message_parsed["type"] == "stream":
data = message_parsed["data"]
self.hls_stream_url = data["uri"]
self.is_stream_ready = True
if message_parsed["type"] == "watch":
body = message_parsed["body"]
command = body["command"]
if command == "currentstream":
current_stream = body["currentStream"]
self.hls_stream_url = current_stream["uri"]
self.is_stream_ready = True
elif command == "watchinginterval":
self.watching_interval = int(body["params"][0])
_log.debug("Got watching_interval: {0}".format(
self.watching_interval))
if self.watching_interval_worker_thread is None:
_log.debug("send_watching_scheduler starting.")
self.watching_interval_worker_thread = threading.Thread(
target=self.send_watching_scheduler)
self.watching_interval_worker_thread.daemon = True
self.watching_interval_worker_thread.start()
else:
_log.debug("send_watching_scheduler already running.")
elif command == "disconnect":
_log.info("Websocket API closed.")
_log.info("Stream ended.")
self.is_stream_ended = True
if self.stream_reader is not None:
self.stream_reader.close()
_log.info("Stream reader closed.")
elif message_parsed["type"] == "ping":
self.send_pong()
def send_watching_scheduler(self):
"""
Periodically send "watching" command to the API.
This is necessary to keep the session alive.
"""
while not self.is_stream_ended:
self.send_watching()
time.sleep(self.watching_interval)
def niconico_web_login(self):
user_session = self.get_option("user-session")
email = self.get_option("email")
password = self.get_option("password")
if user_session is not None:
_log.info("User session cookie is provided. Using it.")
self.session.http.cookies.set(
"user_session",
user_session,
path="/",
domain="nicovideo.jp")
self.save_cookies()
return True
elif email is not None and password is not None:
_log.info("Email and password are provided. Attemping login.")
payload = {"mail_tel": email, "password": password}
resp = self.session.http.post(_login_url, data=payload,
params=_login_url_params)
_log.debug("Login response code: {0}".format(resp.status_code))
_log.trace("Login response body: {0}".format(resp.text))
_log.debug("Cookies: {0}".format(
self.session.http.cookies.get_dict()))
if self.session.http.cookies.get("user_session") is None:
try:
msg = extract_text(
resp.text, '<p class="notice__text">', "</p>")
except Exception as e:
_log.debug(e)
msg = "unknown reason"
_log.warn("Login failed. {0}".format(msg))
return False
else:
_log.info("Logged in.")
self.save_cookies()
return True
else:
_log.warn(
"Neither a email and password combination nor a user session "
"token is provided. Cannot attempt login.")
return False
class NicoHLSStream(HLSStream):
def __init__(self, hls_stream, nicolive_plugin):
super().__init__(
hls_stream.session,
force_restart=hls_stream.force_restart,
start_offset=hls_stream.start_offset,
duration=hls_stream.duration,
**hls_stream.args)
# url is already in hls_stream.args
self.nicolive_plugin = nicolive_plugin
def open(self):
reader = super().open()
self.nicolive_plugin.stream_reader = reader
return reader
def extract_text(text, left, right):
"""Extract text from HTML"""
result = re.findall("{0}(.*?){1}".format(left, right), text)
if len(result) != 1:
raise Exception("Failed to extract string. "
"Expected 1, found {0}".format(len(result)))
return result[0]
def parse_proxy_url(purl):
"""Adapted from UStreamTV plugin (ustreamtv.py)"""
proxy_options = {}
if purl:
p = urlparse(purl)
proxy_options['proxy_type'] = p.scheme
proxy_options['http_proxy_host'] = p.hostname
if p.port:
proxy_options['http_proxy_port'] = p.port
if p.username:
proxy_options['http_proxy_auth'] = \
(unquote_plus(p.username), unquote_plus(p.password or ""))
return proxy_options
__plugin__ = NicoLive
|
|
import yaml
import subprocess
import argparse
import os
vm_host = os.environ.get('VM_HOST')
vm_key = os.environ.get('VM_KEY')
esxi_datastore_folder = "/vmfs/volumes/datastore1"
parser = argparse.ArgumentParser(description='ESXi automation.')
parser.add_argument('-v', '--verbose', action="store_true", help='show progress')
parser.add_argument('-d', '--debug', action="store_true", help='run full debug')
parser.add_argument('--host', required=False, help="user@host")
parser.add_argument('--key', required=False, help="location of private key")
parser.add_argument('command', nargs="*", help="[<args>]")
args = parser.parse_args()
def do_usage():
print "Usage: esx [options] <command> [<args>]"
print ""
print " --host host in the form of user@host, or VM_HOST environment variable"
print " --key keyfile location, or VM_KEY environment variable"
print " -v, --verbose more verbosity"
print " -d, --debug debug level output including remote commands to the server"
print ""
print "Common commands:"
print " vm vm commands are:"
print " list list available vms on this server"
print ""
print " add <name> <config_template> <user_data>"
print " add a new vm, parameters are:"
print " name - name of new vm "
print " config_template - configuration template (in YAML)"
print " user_data - cloud-init user-data file to include, see:"
print " https://cloudinit.readthedocs.org/en/latest/"
print ""
print " delete <name>"
print " power down and delete a vm, parameters are:"
print " name - name of vm to delete"
print ""
print " snapshot <action> <vm name>"
print " list"
print " create"
print " remove"
print " revert"
print " clear"
print ""
print " power <action> <vm name>"
print " status"
print " on"
print " off"
print " reset"
if not args.command or len(args.command) < 1:
print "no command provided."
do_usage()
exit(1)
if not args.command or len(args.command) < 2:
print "no sub-command provided."
do_usage()
exit(1)
if not vm_host and not args.host:
print "no --host parameter or VM_HOST environment variable provided."
do_usage()
exit(1)
if not vm_key and not args.key:
print "no --key parameter or VM_KEY environment variable provided."
do_usage()
exit(1)
# override enviornment variable
if args.key:
vm_key = args.key
# override enviornment variable
if args.host:
vm_host = args.host
def debug(msg):
if args.debug:
print "DEBUG:" + msg
def verbose(msg):
if args.verbose:
print msg
def execute(command):
debug("executing sub-process: " + command)
try:
return {"returncode": 0, "result": subprocess.check_output(command, shell=True)}
except subprocess.CalledProcessError as ex:
return {"returncode": ex.returncode, "result": ex.output}
def remote_execute(cmd):
command = "ssh -o UserKnownHostsFile=/dev/null -o LogLevel=quiet -i " + vm_key + " " + vm_host + " '" + cmd + "'"
return execute(command)
def get_id_list():
result = remote_execute("vim-cmd vmsvc/getallvms | cut -d '[' -f 1")
if result["returncode"] != 0:
print "Error: " + result["result"]
return
id_list = result["result"].split('\n')
result = []
for l in id_list:
x = l.find(" ")
if x > 0:
vm_id = l[0:x]
if vm_id.isdigit():
name = l[x:len(l)].strip()
result.append([vm_id, name])
debug(str(result))
return result
def find_id(name):
id_list = get_id_list()
if id_list:
for item in id_list:
if item[1] == name:
return item[0]
def validate_sysprep_args(sysargs):
return True
def sysprep_list(sysargs):
result = remote_execute("ls -1 " + esxi_datastore_folder + "/sysprep")
if result["returncode"] != 0:
print "Error: " + result["result"]
return
print "List of sysprep images on host"
print result
return True
def sysprep_add(sysargs):
return True
def do_sysprep(sysargs):
subcmd = sysargs[1]
if subcmd == 'list':
sysprep_list(sysargs)
elif subcmd == 'add':
sysprep_add(sysargs)
return True
def validate_template_args(tempargs):
return True
def do_template(tempargs):
debug("template")
return True
def validate_vm_args(vmargs):
return True
def vm_list(vmargs):
list = get_id_list()
print "List of registered VMs"
for item in list:
print("%5s %s" % (item[0], item[1]))
return True
def vm_power(vmargs):
arg = vmargs[2]
if arg == 'status':
vm_id = find_id(vmargs[3]) # name of vm
if vm_id:
result = remote_execute("vim-cmd vmsvc/power.getstate " + vm_id)
if result["returncode"] != 0:
print "Error: " + result["result"]
return
print result["result"]
elif arg == 'off':
vm_id = find_id(vmargs[3]) # name of vm
if vm_id:
result = remote_execute("vim-cmd vmsvc/power.off " + vm_id)
if result["returncode"] != 0:
print "Error: " + result["result"]
return
print result["result"]
elif arg == 'on':
vm_id = find_id(vmargs[3]) # name of vm
if vm_id:
result = remote_execute("vim-cmd vmsvc/power.on " + vm_id)
if result["returncode"] != 0:
print "Error: " + result["result"]
return
print result["result"]
elif arg == 'reset':
vm_id = find_id(vmargs[3]) # name of vm
if vm_id:
result = remote_execute("vim-cmd vmsvc/power.reset " + vm_id)
if result["returncode"] != 0:
print "Error: " + result["result"]
return
print result["result"]
else:
print "invalid action [ " + arg + " ] for vm power command."
do_usage()
exit(1)
return True
def load_vm_config(filename):
config = yaml.load(file(filename))
# todo: validate
return config
def build_seed_iso(name, userdatafile):
result = execute("rm -f user-data meta-data seed.iso")
if result["returncode"] != 0:
print "Error: " + result["result"]
return
debug(result["result"])
meta_file = open("meta-data", 'w')
meta_file.write("instance-id: iid-local01\n")
meta_file.write("local-hostname: " + name + "\n")
meta_file.write("hostname: " + name + "\n")
meta_file.close()
result = execute("cp -f " + userdatafile + " user-data")
result = execute("genisoimage -quiet -output seed.iso -volid cidata -joliet -rock user-data meta-data")
if result["returncode"] != 0:
print "Error: " + result["result"]
return
debug(result["result"])
def vm_add(vmargs):
name = vmargs[2]
filename = vmargs[3]
userdatafile = vmargs[4]
verbose("building new instance: " + name)
verbose(" - template : " + filename)
config = load_vm_config(filename)
mem = str(config.get("memory", "1024"))
cpu = str(config.get("cpu", "1"))
disk = config.get("disk")
power = config.get("power")
image = config.get("image", 'trusty-server-nocloud-amd64-disk1')
verbose(" - image: " + image)
# TODO check to see if VM by name already exists
# create folder
verbose(" - folder: " + esxi_datastore_folder + "/" + name)
result = remote_execute("mkdir -p " + esxi_datastore_folder + "/" + name)
if result["returncode"] != 0:
print "Error: " + result["result"]
return
debug(result["result"])
# copy sysprep vmdk to folder
verbose(" - staging new vm...")
result = remote_execute(
"vmkfstools -i " + esxi_datastore_folder + "/sysprep/" + image + ".vmdk -d thin " +
esxi_datastore_folder + "/" + name + "/" + name + ".vmdk ")
if result["returncode"] != 0:
print "Error: " + result["result"]
return
debug(result["result"])
result = remote_execute(
"cp " + esxi_datastore_folder + "/sysprep/" + image + ".vmx-template " +
esxi_datastore_folder + "/" + name + "/" + name + ".vmx ")
if result["returncode"] != 0:
print "Error: " + result["result"]
return
debug(result["result"])
if disk:
verbose(" - resizing disk to: " + disk)
result = remote_execute(
"vmkfstools -X " + disk + " " + esxi_datastore_folder + "/" + name + "/" + name + ".vmdk ")
if result["returncode"] != 0:
print "Error: " + result["result"]
return
debug(result["result"])
verbose(" - building seed iso... ")
# create seed.iso with vm metadata and account info
build_seed_iso(name, userdatafile)
# copy seed.iso to remote folder
result = execute(
"scp -o UserKnownHostsFile=/dev/null -o LogLevel=quiet -i " + vm_key + " seed.iso " +
vm_host + ":" + esxi_datastore_folder + "/" + name)
if result["returncode"] != 0:
print "Error: " + result["result"]
return
debug(result["result"])
result = execute("rm -f seed.iso user-data meta-data")
if result["returncode"] != 0:
print "Error: " + result["result"]
return
debug(result["result"])
iso = "seed.iso"
verbose(" - applying settings... ")
result = remote_execute(
"sed -i \"s/{ISO}/" + iso + "/g\" " + esxi_datastore_folder + "/" + name + "/" + name + ".vmx ")
if result["returncode"] != 0:
print "Error: " + result["result"]
return
debug(result["result"])
# update vmx with profile on cpu/mem/iso in remote folder
result = remote_execute(
"sed -i \"s/{CPU}/" + cpu + "/g\" " + esxi_datastore_folder + "/" + name + "/" + name + ".vmx ")
if result["returncode"] != 0:
print "Error: " + result["result"]
return
debug(result["result"])
result = remote_execute(
"sed -i \"s/{NAME}/" + name + "/g\" " + esxi_datastore_folder + "/" + name + "/" + name + ".vmx ")
if result["returncode"] != 0:
print "Error: " + result["result"]
return
debug(result["result"])
result = remote_execute(
"sed -i \"s/{RAM}/" + mem + "/g\" " + esxi_datastore_folder + "/" + name + "/" + name + ".vmx ")
if result["returncode"] != 0:
print "Error: " + result["result"]
return
debug(result["result"])
verbose(" - registering vm ")
# register vm with esxi
result = remote_execute("vim-cmd solo/registervm " + esxi_datastore_folder + "/" + name + "/" + name + ".vmx ")
if result["returncode"] != 0:
print "Error: " + result["result"]
return
debug(result["result"])
# if requested, start vm
if power:
verbose(" - powering on vm. ")
vm_id = find_id(name)
if vm_id:
result = remote_execute("vim-cmd vmsvc/power.on " + vm_id + "")
if result["returncode"] != 0:
print "Error: " + result["result"]
return
debug(result["result"])
return True
def vm_delete(vmargs):
name = vmargs[2]
if name:
vm_id = find_id(name) # name of vm
if vm_id:
verbose("deleting " + name + " (id:" + vm_id + ")")
verbose(" - powering off vm...")
# force power off
result = remote_execute("vim-cmd vmsvc/power.off " + vm_id + "")
if result["returncode"] != 0:
print "Error: " + result["result"]
debug(result["result"])
verbose(" - cleaning up seed.iso...")
# remove seed.iso
result = remote_execute("rm -f " + esxi_datastore_folder + "/" + name + "/seed.iso")
if result["returncode"] != 0:
print "Error: " + result["result"]
debug(result["result"])
verbose(" - destroying vm...")
# destroy
result = remote_execute("vim-cmd vmsvc/destroy " + vm_id + "")
if result["returncode"] != 0:
print "Error: " + result["result"]
return
debug(result["result"])
def vm_snapshot(vmargs):
arg = vmargs[2]
if arg == 'list':
vm_id = find_id(vmargs[3]) # name of vm
if vm_id:
result = remote_execute("vim-cmd vmsvc/snapshot.get " + vm_id)
if result["returncode"] != 0:
print "Error: " + result["result"]
return
debug(result["result"])
elif arg == 'create':
vm_id = find_id(vmargs[3]) # name of vm
if vm_id:
result = remote_execute("vim-cmd vmsvc/snapshot.create " + vm_id + " " + vmargs[4])
if result["returncode"] != 0:
print "Error: " + result["result"]
return
print result["result"]
elif arg == 'remove':
vm_id = find_id(vmargs[3]) # name of vm
if vm_id:
result = remote_execute("vim-cmd vmsvc/snapshot.remove " + vm_id + " " + vmargs[4])
if result["returncode"] != 0:
print "Error: " + result["result"]
return
print result["result"]
elif arg == 'revert':
vm_id = find_id(vmargs[3]) # name of vm
if vm_id:
result = remote_execute("vim-cmd vmsvc/snapshot.revert " + vm_id + " " + vmargs[4])
if result["returncode"] != 0:
print "Error: " + result["result"]
return
print result["result"]
elif arg == 'clear':
vm_id = find_id(vmargs[3]) # name of vm
if vm_id:
result = remote_execute("vim-cmd vmsvc/snapshot.removeall " + vm_id)
if result["returncode"] != 0:
print "Error: " + result["result"]
return
print result["result"]
else:
print "invalid action [ " + arg + " ] for vm snapshot command."
do_usage()
exit(1)
return True
def do_vm(vmargs):
subcmd = vmargs[1]
if subcmd == 'list':
vm_list(vmargs)
elif subcmd == 'power':
vm_power(vmargs)
elif subcmd == 'add':
vm_add(vmargs)
elif subcmd == 'delete':
vm_delete(vmargs)
elif subcmd == 'snapshot':
vm_snapshot(vmargs)
else:
print "invalid vm command."
do_usage()
exit(1)
return True
if args.command[0] == 'sysprep':
if validate_sysprep_args(args.command):
do_sysprep(args.command)
elif args.command[0] == 'template':
if validate_template_args(args.command):
do_template(args.command)
elif args.command[0] == 'vm':
if validate_vm_args(args.command):
do_vm(args.command)
else:
print "invalid command."
do_usage()
exit(1)
|
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_HTTPError,
compat_urllib_request,
compat_urlparse,
)
from ..utils import (
determine_ext,
extract_attributes,
ExtractorError,
float_or_none,
int_or_none,
sanitized_Request,
unescapeHTML,
urlencode_postdata,
)
class UdemyIE(InfoExtractor):
IE_NAME = 'udemy'
_VALID_URL = r'''(?x)
https?://
www\.udemy\.com/
(?:
[^#]+\#/lecture/|
lecture/view/?\?lectureId=|
[^/]+/learn/v4/t/lecture/
)
(?P<id>\d+)
'''
_LOGIN_URL = 'https://www.udemy.com/join/login-popup/?displayType=ajax&showSkipButton=1'
_ORIGIN_URL = 'https://www.udemy.com'
_NETRC_MACHINE = 'udemy'
_TESTS = [{
'url': 'https://www.udemy.com/java-tutorial/#/lecture/172757',
'md5': '98eda5b657e752cf945d8445e261b5c5',
'info_dict': {
'id': '160614',
'ext': 'mp4',
'title': 'Introduction and Installation',
'description': 'md5:c0d51f6f21ef4ec65f091055a5eef876',
'duration': 579.29,
},
'skip': 'Requires udemy account credentials',
}, {
# new URL schema
'url': 'https://www.udemy.com/electric-bass-right-from-the-start/learn/v4/t/lecture/4580906',
'only_matching': True,
}]
def _extract_course_info(self, webpage, video_id):
course = self._parse_json(
unescapeHTML(self._search_regex(
r'ng-init=["\'].*\bcourse=({.+?});', webpage, 'course', default='{}')),
video_id, fatal=False) or {}
course_id = course.get('id') or self._search_regex(
(r'"id"\s*:\s*(\d+)', r'data-course-id=["\'](\d+)'),
webpage, 'course id')
return course_id, course.get('title')
def _enroll_course(self, base_url, webpage, course_id):
def combine_url(base_url, url):
return compat_urlparse.urljoin(base_url, url) if not url.startswith('http') else url
checkout_url = unescapeHTML(self._search_regex(
r'href=(["\'])(?P<url>(?:https?://(?:www\.)?udemy\.com)?/payment/checkout/.+?)\1',
webpage, 'checkout url', group='url', default=None))
if checkout_url:
raise ExtractorError(
'Course %s is not free. You have to pay for it before you can download. '
'Use this URL to confirm purchase: %s'
% (course_id, combine_url(base_url, checkout_url)),
expected=True)
enroll_url = unescapeHTML(self._search_regex(
r'href=(["\'])(?P<url>(?:https?://(?:www\.)?udemy\.com)?/course/subscribe/.+?)\1',
webpage, 'enroll url', group='url', default=None))
if enroll_url:
webpage = self._download_webpage(
combine_url(base_url, enroll_url),
course_id, 'Enrolling in the course',
headers={'Referer': base_url})
if '>You have enrolled in' in webpage:
self.to_screen('%s: Successfully enrolled in the course' % course_id)
def _download_lecture(self, course_id, lecture_id):
return self._download_json(
'https://www.udemy.com/api-2.0/users/me/subscribed-courses/%s/lectures/%s?'
% (course_id, lecture_id),
lecture_id, 'Downloading lecture JSON', query={
'fields[lecture]': 'title,description,view_html,asset',
'fields[asset]': 'asset_type,stream_url,thumbnail_url,download_urls,data',
})
def _handle_error(self, response):
if not isinstance(response, dict):
return
error = response.get('error')
if error:
error_str = 'Udemy returned error #%s: %s' % (error.get('code'), error.get('message'))
error_data = error.get('data')
if error_data:
error_str += ' - %s' % error_data.get('formErrors')
raise ExtractorError(error_str, expected=True)
def _download_json(self, url_or_request, *args, **kwargs):
headers = {
'X-Udemy-Snail-Case': 'true',
'X-Requested-With': 'XMLHttpRequest',
}
for cookie in self._downloader.cookiejar:
if cookie.name == 'client_id':
headers['X-Udemy-Client-Id'] = cookie.value
elif cookie.name == 'access_token':
headers['X-Udemy-Bearer-Token'] = cookie.value
headers['X-Udemy-Authorization'] = 'Bearer %s' % cookie.value
if isinstance(url_or_request, compat_urllib_request.Request):
for header, value in headers.items():
url_or_request.add_header(header, value)
else:
url_or_request = sanitized_Request(url_or_request, headers=headers)
response = super(UdemyIE, self)._download_json(url_or_request, *args, **kwargs)
self._handle_error(response)
return response
def _real_initialize(self):
self._login()
def _login(self):
(username, password) = self._get_login_info()
if username is None:
return
login_popup = self._download_webpage(
self._LOGIN_URL, None, 'Downloading login popup')
def is_logged(webpage):
return any(re.search(p, webpage) for p in (
r'href=["\'](?:https://www\.udemy\.com)?/user/logout/',
r'>Logout<'))
# already logged in
if is_logged(login_popup):
return
login_form = self._form_hidden_inputs('login-form', login_popup)
login_form.update({
'email': username,
'password': password,
})
response = self._download_webpage(
self._LOGIN_URL, None, 'Logging in as %s' % username,
data=urlencode_postdata(login_form),
headers={
'Referer': self._ORIGIN_URL,
'Origin': self._ORIGIN_URL,
})
if not is_logged(response):
error = self._html_search_regex(
r'(?s)<div[^>]+class="form-errors[^"]*">(.+?)</div>',
response, 'error message', default=None)
if error:
raise ExtractorError('Unable to login: %s' % error, expected=True)
raise ExtractorError('Unable to log in')
def _real_extract(self, url):
lecture_id = self._match_id(url)
webpage = self._download_webpage(url, lecture_id)
course_id, _ = self._extract_course_info(webpage, lecture_id)
try:
lecture = self._download_lecture(course_id, lecture_id)
except ExtractorError as e:
# Error could possibly mean we are not enrolled in the course
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
self._enroll_course(url, webpage, course_id)
lecture = self._download_lecture(course_id, lecture_id)
else:
raise
title = lecture['title']
description = lecture.get('description')
asset = lecture['asset']
asset_type = asset.get('asset_type') or asset.get('assetType')
if asset_type != 'Video':
raise ExtractorError(
'Lecture %s is not a video' % lecture_id, expected=True)
stream_url = asset.get('stream_url') or asset.get('streamUrl')
if stream_url:
youtube_url = self._search_regex(
r'(https?://www\.youtube\.com/watch\?v=.*)', stream_url, 'youtube URL', default=None)
if youtube_url:
return self.url_result(youtube_url, 'Youtube')
video_id = asset['id']
thumbnail = asset.get('thumbnail_url') or asset.get('thumbnailUrl')
duration = float_or_none(asset.get('data', {}).get('duration'))
formats = []
def extract_output_format(src):
return {
'url': src['url'],
'format_id': '%sp' % (src.get('height') or format_id),
'width': int_or_none(src.get('width')),
'height': int_or_none(src.get('height')),
'vbr': int_or_none(src.get('video_bitrate_in_kbps')),
'vcodec': src.get('video_codec'),
'fps': int_or_none(src.get('frame_rate')),
'abr': int_or_none(src.get('audio_bitrate_in_kbps')),
'acodec': src.get('audio_codec'),
'asr': int_or_none(src.get('audio_sample_rate')),
'tbr': int_or_none(src.get('total_bitrate_in_kbps')),
'filesize': int_or_none(src.get('file_size_in_bytes')),
}
outputs = asset.get('data', {}).get('outputs')
if not isinstance(outputs, dict):
outputs = {}
def add_output_format_meta(f, key):
output = outputs.get(key)
if isinstance(output, dict):
output_format = extract_output_format(output)
output_format.update(f)
return output_format
return f
download_urls = asset.get('download_urls')
if isinstance(download_urls, dict):
video = download_urls.get('Video')
if isinstance(video, list):
for format_ in video:
video_url = format_.get('file')
if not video_url:
continue
format_id = format_.get('label')
f = {
'url': format_['file'],
'format_id': '%sp' % format_id,
'height': int_or_none(format_id),
}
if format_id:
# Some videos contain additional metadata (e.g.
# https://www.udemy.com/ios9-swift/learn/#/lecture/3383208)
f = add_output_format_meta(f, format_id)
formats.append(f)
view_html = lecture.get('view_html')
if view_html:
view_html_urls = set()
for source in re.findall(r'<source[^>]+>', view_html):
attributes = extract_attributes(source)
src = attributes.get('src')
if not src:
continue
res = attributes.get('data-res')
height = int_or_none(res)
if src in view_html_urls:
continue
view_html_urls.add(src)
if attributes.get('type') == 'application/x-mpegURL' or determine_ext(src) == 'm3u8':
m3u8_formats = self._extract_m3u8_formats(
src, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False)
for f in m3u8_formats:
m = re.search(r'/hls_(?P<height>\d{3,4})_(?P<tbr>\d{2,})/', f['url'])
if m:
if not f.get('height'):
f['height'] = int(m.group('height'))
if not f.get('tbr'):
f['tbr'] = int(m.group('tbr'))
formats.extend(m3u8_formats)
else:
formats.append(add_output_format_meta({
'url': src,
'format_id': '%dp' % height if height else None,
'height': height,
}, res))
self._sort_formats(formats, field_preference=('height', 'width', 'tbr', 'format_id'))
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats
}
class UdemyCourseIE(UdemyIE):
IE_NAME = 'udemy:course'
_VALID_URL = r'https?://www\.udemy\.com/(?P<id>[^/?#&]+)'
_TESTS = []
@classmethod
def suitable(cls, url):
return False if UdemyIE.suitable(url) else super(UdemyCourseIE, cls).suitable(url)
def _real_extract(self, url):
course_path = self._match_id(url)
webpage = self._download_webpage(url, course_path)
course_id, title = self._extract_course_info(webpage, course_path)
self._enroll_course(url, webpage, course_id)
response = self._download_json(
'https://www.udemy.com/api-2.0/courses/%s/cached-subscriber-curriculum-items' % course_id,
course_id, 'Downloading course curriculum', query={
'fields[chapter]': 'title,object_index',
'fields[lecture]': 'title,asset',
'page_size': '1000',
})
entries = []
chapter, chapter_number = [None] * 2
for entry in response['results']:
clazz = entry.get('_class')
if clazz == 'lecture':
asset = entry.get('asset')
if isinstance(asset, dict):
asset_type = asset.get('asset_type') or asset.get('assetType')
if asset_type != 'Video':
continue
lecture_id = entry.get('id')
if lecture_id:
entry = {
'_type': 'url_transparent',
'url': 'https://www.udemy.com/%s/learn/v4/t/lecture/%s' % (course_path, entry['id']),
'title': entry.get('title'),
'ie_key': UdemyIE.ie_key(),
}
if chapter_number:
entry['chapter_number'] = chapter_number
if chapter:
entry['chapter'] = chapter
entries.append(entry)
elif clazz == 'chapter':
chapter_number = entry.get('object_index')
chapter = entry.get('title')
return self.playlist_result(entries, course_id, title)
|
|
import configparser
import os
import shutil
import stat
from binascii import unhexlify
from collections import namedtuple
import msgpack
from .logger import create_logger
logger = create_logger()
from .constants import CACHE_README
from .hashindex import ChunkIndex, ChunkIndexEntry
from .helpers import Location
from .helpers import Error
from .helpers import get_cache_dir, get_security_dir
from .helpers import int_to_bigint, bigint_to_int, bin_to_hex
from .helpers import format_file_size
from .helpers import safe_ns
from .helpers import yes, hostname_is_unique
from .helpers import remove_surrogates
from .helpers import ProgressIndicatorPercent, ProgressIndicatorMessage
from .item import ArchiveItem, ChunkListEntry
from .crypto.key import PlaintextKey
from .locking import Lock
from .platform import SaveFile
from .remote import cache_if_remote
FileCacheEntry = namedtuple('FileCacheEntry', 'age inode size mtime chunk_ids')
class SecurityManager:
"""
Tracks repositories. Ensures that nothing bad happens (repository swaps,
replay attacks, unknown repositories etc.).
This is complicated by the Cache being initially used for this, while
only some commands actually use the Cache, which meant that other commands
did not perform these checks.
Further complications were created by the Cache being a cache, so it
could be legitimately deleted, which is annoying because Borg didn't
recognize repositories after that.
Therefore a second location, the security database (see get_security_dir),
was introduced which stores this information. However, this means that
the code has to deal with a cache existing but no security DB entry,
or inconsistencies between the security DB and the cache which have to
be reconciled, and also with no cache existing but a security DB entry.
"""
def __init__(self, repository):
self.repository = repository
self.dir = get_security_dir(repository.id_str)
self.cache_dir = cache_dir(repository)
self.key_type_file = os.path.join(self.dir, 'key-type')
self.location_file = os.path.join(self.dir, 'location')
self.manifest_ts_file = os.path.join(self.dir, 'manifest-timestamp')
def known(self):
return os.path.exists(self.key_type_file)
def key_matches(self, key):
if not self.known():
return False
try:
with open(self.key_type_file, 'r') as fd:
type = fd.read()
return type == str(key.TYPE)
except OSError as exc:
logger.warning('Could not read/parse key type file: %s', exc)
def save(self, manifest, key):
logger.debug('security: saving state for %s to %s', self.repository.id_str, self.dir)
current_location = self.repository._location.canonical_path()
logger.debug('security: current location %s', current_location)
logger.debug('security: key type %s', str(key.TYPE))
logger.debug('security: manifest timestamp %s', manifest.timestamp)
with open(self.location_file, 'w') as fd:
fd.write(current_location)
with open(self.key_type_file, 'w') as fd:
fd.write(str(key.TYPE))
with open(self.manifest_ts_file, 'w') as fd:
fd.write(manifest.timestamp)
def assert_location_matches(self, cache_config=None):
# Warn user before sending data to a relocated repository
try:
with open(self.location_file) as fd:
previous_location = fd.read()
logger.debug('security: read previous location %r', previous_location)
except FileNotFoundError:
logger.debug('security: previous location file %s not found', self.location_file)
previous_location = None
except OSError as exc:
logger.warning('Could not read previous location file: %s', exc)
previous_location = None
if cache_config and cache_config.previous_location and previous_location != cache_config.previous_location:
# Reconcile cache and security dir; we take the cache location.
previous_location = cache_config.previous_location
logger.debug('security: using previous_location of cache: %r', previous_location)
repository_location = self.repository._location.canonical_path()
if previous_location and previous_location != repository_location:
msg = ("Warning: The repository at location {} was previously located at {}\n".format(
repository_location, previous_location) +
"Do you want to continue? [yN] ")
if not yes(msg, false_msg="Aborting.", invalid_msg="Invalid answer, aborting.",
retry=False, env_var_override='BORG_RELOCATED_REPO_ACCESS_IS_OK'):
raise Cache.RepositoryAccessAborted()
# adapt on-disk config immediately if the new location was accepted
logger.debug('security: updating location stored in cache and security dir')
with open(self.location_file, 'w') as fd:
fd.write(repository_location)
if cache_config:
cache_config.save()
def assert_no_manifest_replay(self, manifest, key, cache_config=None):
try:
with open(self.manifest_ts_file) as fd:
timestamp = fd.read()
logger.debug('security: read manifest timestamp %r', timestamp)
except FileNotFoundError:
logger.debug('security: manifest timestamp file %s not found', self.manifest_ts_file)
timestamp = ''
except OSError as exc:
logger.warning('Could not read previous location file: %s', exc)
timestamp = ''
if cache_config:
timestamp = max(timestamp, cache_config.timestamp or '')
logger.debug('security: determined newest manifest timestamp as %s', timestamp)
# If repository is older than the cache or security dir something fishy is going on
if timestamp and timestamp > manifest.timestamp:
if isinstance(key, PlaintextKey):
raise Cache.RepositoryIDNotUnique()
else:
raise Cache.RepositoryReplay()
def assert_key_type(self, key, cache_config=None):
# Make sure an encrypted repository has not been swapped for an unencrypted repository
if cache_config and cache_config.key_type is not None and cache_config.key_type != str(key.TYPE):
raise Cache.EncryptionMethodMismatch()
if self.known() and not self.key_matches(key):
raise Cache.EncryptionMethodMismatch()
def assert_secure(self, manifest, key, *, cache_config=None, warn_if_unencrypted=True):
# warn_if_unencrypted=False is only used for initializing a new repository.
# Thus, avoiding asking about a repository that's currently initializing.
self.assert_access_unknown(warn_if_unencrypted, manifest, key)
if cache_config:
self._assert_secure(manifest, key, cache_config)
else:
cache_config = CacheConfig(self.repository)
if cache_config.exists():
with cache_config:
self._assert_secure(manifest, key, cache_config)
else:
self._assert_secure(manifest, key)
logger.debug('security: repository checks ok, allowing access')
def _assert_secure(self, manifest, key, cache_config=None):
self.assert_location_matches(cache_config)
self.assert_key_type(key, cache_config)
self.assert_no_manifest_replay(manifest, key, cache_config)
if not self.known():
logger.debug('security: remembering previously unknown repository')
self.save(manifest, key)
def assert_access_unknown(self, warn_if_unencrypted, manifest, key):
# warn_if_unencrypted=False is only used for initializing a new repository.
# Thus, avoiding asking about a repository that's currently initializing.
if not key.logically_encrypted and not self.known():
msg = ("Warning: Attempting to access a previously unknown unencrypted repository!\n" +
"Do you want to continue? [yN] ")
allow_access = not warn_if_unencrypted or yes(msg, false_msg="Aborting.",
invalid_msg="Invalid answer, aborting.",
retry=False, env_var_override='BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK')
if allow_access:
if warn_if_unencrypted:
logger.debug('security: remembering unknown unencrypted repository (explicitly allowed)')
else:
logger.debug('security: initializing unencrypted repository')
self.save(manifest, key)
else:
raise Cache.CacheInitAbortedError()
def assert_secure(repository, manifest):
sm = SecurityManager(repository)
sm.assert_secure(manifest, manifest.key)
def recanonicalize_relative_location(cache_location, repository):
# borg < 1.0.8rc1 had different canonicalization for the repo location (see #1655 and #1741).
repo_location = repository._location.canonical_path()
rl = Location(repo_location)
cl = Location(cache_location)
if cl.proto == rl.proto and cl.user == rl.user and cl.host == rl.host and cl.port == rl.port \
and \
cl.path and rl.path and \
cl.path.startswith('/~/') and rl.path.startswith('/./') and cl.path[3:] == rl.path[3:]:
# everything is same except the expected change in relative path canonicalization,
# update previous_location to avoid warning / user query about changed location:
return repo_location
else:
return cache_location
def cache_dir(repository, path=None):
return path or os.path.join(get_cache_dir(), repository.id_str)
class CacheConfig:
def __init__(self, repository, path=None, lock_wait=None):
self.repository = repository
self.path = cache_dir(repository, path)
self.config_path = os.path.join(self.path, 'config')
self.lock = None
self.lock_wait = lock_wait
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def exists(self):
return os.path.exists(self.config_path)
def create(self):
assert not self.exists()
config = configparser.ConfigParser(interpolation=None)
config.add_section('cache')
config.set('cache', 'version', '1')
config.set('cache', 'repository', self.repository.id_str)
config.set('cache', 'manifest', '')
with SaveFile(self.config_path) as fd:
config.write(fd)
def open(self):
self.lock = Lock(os.path.join(self.path, 'lock'), exclusive=True, timeout=self.lock_wait,
kill_stale_locks=hostname_is_unique()).acquire()
self.load()
def load(self):
self._config = configparser.ConfigParser(interpolation=None)
self._config.read(self.config_path)
self._check_upgrade(self.config_path)
self.id = self._config.get('cache', 'repository')
self.manifest_id = unhexlify(self._config.get('cache', 'manifest'))
self.timestamp = self._config.get('cache', 'timestamp', fallback=None)
self.key_type = self._config.get('cache', 'key_type', fallback=None)
previous_location = self._config.get('cache', 'previous_location', fallback=None)
if previous_location:
self.previous_location = recanonicalize_relative_location(previous_location, self.repository)
else:
self.previous_location = None
def save(self, manifest=None, key=None):
if manifest:
self._config.set('cache', 'manifest', manifest.id_str)
self._config.set('cache', 'timestamp', manifest.timestamp)
if key:
self._config.set('cache', 'key_type', str(key.TYPE))
self._config.set('cache', 'previous_location', self.repository._location.canonical_path())
with SaveFile(self.config_path) as fd:
self._config.write(fd)
def close(self):
if self.lock is not None:
self.lock.release()
self.lock = None
def _check_upgrade(self, config_path):
try:
cache_version = self._config.getint('cache', 'version')
wanted_version = 1
if cache_version != wanted_version:
self.close()
raise Exception('%s has unexpected cache version %d (wanted: %d).' %
(config_path, cache_version, wanted_version))
except configparser.NoSectionError:
self.close()
raise Exception('%s does not look like a Borg cache.' % config_path) from None
class Cache:
"""Client Side cache
"""
class RepositoryIDNotUnique(Error):
"""Cache is newer than repository - do you have multiple, independently updated repos with same ID?"""
class RepositoryReplay(Error):
"""Cache is newer than repository - this is either an attack or unsafe (multiple repos with same ID)"""
class CacheInitAbortedError(Error):
"""Cache initialization aborted"""
class RepositoryAccessAborted(Error):
"""Repository access aborted"""
class EncryptionMethodMismatch(Error):
"""Repository encryption method changed since last access, refusing to continue"""
@staticmethod
def break_lock(repository, path=None):
path = cache_dir(repository, path)
Lock(os.path.join(path, 'lock'), exclusive=True).break_lock()
@staticmethod
def destroy(repository, path=None):
"""destroy the cache for ``repository`` or at ``path``"""
path = path or os.path.join(get_cache_dir(), repository.id_str)
config = os.path.join(path, 'config')
if os.path.exists(config):
os.remove(config) # kill config first
shutil.rmtree(path)
def __init__(self, repository, key, manifest, path=None, sync=True, do_files=False, warn_if_unencrypted=True,
progress=False, lock_wait=None):
"""
:param do_files: use file metadata cache
:param warn_if_unencrypted: print warning if accessing unknown unencrypted repository
:param lock_wait: timeout for lock acquisition (None: return immediately if lock unavailable)
:param sync: do :meth:`.sync`
"""
self.repository = repository
self.key = key
self.manifest = manifest
self.progress = progress
self.do_files = do_files
self.timestamp = None
self.txn_active = False
self.path = cache_dir(repository, path)
self.security_manager = SecurityManager(repository)
self.cache_config = CacheConfig(self.repository, self.path, lock_wait)
# Warn user before sending data to a never seen before unencrypted repository
if not os.path.exists(self.path):
self.security_manager.assert_access_unknown(warn_if_unencrypted, manifest, key)
self.create()
self.open()
try:
self.security_manager.assert_secure(manifest, key, cache_config=self.cache_config)
if sync and self.manifest.id != self.cache_config.manifest_id:
self.sync()
self.commit()
except:
self.close()
raise
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def __str__(self):
fmt = """\
All archives: {0.total_size:>20s} {0.total_csize:>20s} {0.unique_csize:>20s}
Unique chunks Total chunks
Chunk index: {0.total_unique_chunks:20d} {0.total_chunks:20d}"""
return fmt.format(self.format_tuple())
Summary = namedtuple('Summary', ['total_size', 'total_csize', 'unique_size', 'unique_csize', 'total_unique_chunks',
'total_chunks'])
def stats(self):
# XXX: this should really be moved down to `hashindex.pyx`
stats = self.Summary(*self.chunks.summarize())._asdict()
return stats
def format_tuple(self):
stats = self.stats()
for field in ['total_size', 'total_csize', 'unique_csize']:
stats[field] = format_file_size(stats[field])
return self.Summary(**stats)
def chunks_stored_size(self):
return self.stats()['unique_csize']
def create(self):
"""Create a new empty cache at `self.path`
"""
os.makedirs(self.path)
with open(os.path.join(self.path, 'README'), 'w') as fd:
fd.write(CACHE_README)
self.cache_config.create()
ChunkIndex().write(os.path.join(self.path, 'chunks').encode('utf-8'))
os.makedirs(os.path.join(self.path, 'chunks.archive.d'))
with SaveFile(os.path.join(self.path, 'files'), binary=True) as fd:
pass # empty file
def _do_open(self):
self.cache_config.load()
self.chunks = ChunkIndex.read(os.path.join(self.path, 'chunks').encode('utf-8'))
self.files = None
def open(self):
if not os.path.isdir(self.path):
raise Exception('%s Does not look like a Borg cache' % self.path)
self.cache_config.open()
self.rollback()
def close(self):
if self.cache_config is not None:
self.cache_config.close()
self.cache_config = None
def _read_files(self):
self.files = {}
self._newest_mtime = None
logger.debug('Reading files cache ...')
with open(os.path.join(self.path, 'files'), 'rb') as fd:
u = msgpack.Unpacker(use_list=True)
while True:
data = fd.read(64 * 1024)
if not data:
break
u.feed(data)
for path_hash, item in u:
entry = FileCacheEntry(*item)
# in the end, this takes about 240 Bytes per file
self.files[path_hash] = msgpack.packb(entry._replace(age=entry.age + 1))
def begin_txn(self):
# Initialize transaction snapshot
pi = ProgressIndicatorMessage(msgid='cache.begin_transaction')
txn_dir = os.path.join(self.path, 'txn.tmp')
os.mkdir(txn_dir)
pi.output('Initializing cache transaction: Reading config')
shutil.copy(os.path.join(self.path, 'config'), txn_dir)
pi.output('Initializing cache transaction: Reading chunks')
shutil.copy(os.path.join(self.path, 'chunks'), txn_dir)
pi.output('Initializing cache transaction: Reading files')
shutil.copy(os.path.join(self.path, 'files'), txn_dir)
os.rename(os.path.join(self.path, 'txn.tmp'),
os.path.join(self.path, 'txn.active'))
self.txn_active = True
pi.finish()
def commit(self):
"""Commit transaction
"""
if not self.txn_active:
return
self.security_manager.save(self.manifest, self.key)
pi = ProgressIndicatorMessage(msgid='cache.commit')
if self.files is not None:
if self._newest_mtime is None:
# was never set because no files were modified/added
self._newest_mtime = 2 ** 63 - 1 # nanoseconds, good until y2262
ttl = int(os.environ.get('BORG_FILES_CACHE_TTL', 20))
pi.output('Saving files cache')
with SaveFile(os.path.join(self.path, 'files'), binary=True) as fd:
for path_hash, item in self.files.items():
# Only keep files seen in this backup that are older than newest mtime seen in this backup -
# this is to avoid issues with filesystem snapshots and mtime granularity.
# Also keep files from older backups that have not reached BORG_FILES_CACHE_TTL yet.
entry = FileCacheEntry(*msgpack.unpackb(item))
if entry.age == 0 and bigint_to_int(entry.mtime) < self._newest_mtime or \
entry.age > 0 and entry.age < ttl:
msgpack.pack((path_hash, entry), fd)
pi.output('Saving cache config')
self.cache_config.save(self.manifest, self.key)
pi.output('Saving chunks cache')
self.chunks.write(os.path.join(self.path, 'chunks').encode('utf-8'))
os.rename(os.path.join(self.path, 'txn.active'),
os.path.join(self.path, 'txn.tmp'))
shutil.rmtree(os.path.join(self.path, 'txn.tmp'))
self.txn_active = False
pi.finish()
def rollback(self):
"""Roll back partial and aborted transactions
"""
# Remove partial transaction
if os.path.exists(os.path.join(self.path, 'txn.tmp')):
shutil.rmtree(os.path.join(self.path, 'txn.tmp'))
# Roll back active transaction
txn_dir = os.path.join(self.path, 'txn.active')
if os.path.exists(txn_dir):
shutil.copy(os.path.join(txn_dir, 'config'), self.path)
shutil.copy(os.path.join(txn_dir, 'chunks'), self.path)
shutil.copy(os.path.join(txn_dir, 'files'), self.path)
os.rename(txn_dir, os.path.join(self.path, 'txn.tmp'))
if os.path.exists(os.path.join(self.path, 'txn.tmp')):
shutil.rmtree(os.path.join(self.path, 'txn.tmp'))
self.txn_active = False
self._do_open()
def sync(self):
"""Re-synchronize chunks cache with repository.
Maintains a directory with known backup archive indexes, so it only
needs to fetch infos from repo and build a chunk index once per backup
archive.
If out of sync, missing archive indexes get added, outdated indexes
get removed and a new master chunks index is built by merging all
archive indexes.
"""
archive_path = os.path.join(self.path, 'chunks.archive.d')
def mkpath(id, suffix=''):
id_hex = bin_to_hex(id)
path = os.path.join(archive_path, id_hex + suffix)
return path.encode('utf-8')
def cached_archives():
if self.do_cache:
fns = os.listdir(archive_path)
# filenames with 64 hex digits == 256bit
return set(unhexlify(fn) for fn in fns if len(fn) == 64)
else:
return set()
def repo_archives():
return set(info.id for info in self.manifest.archives.list())
def cleanup_outdated(ids):
for id in ids:
os.unlink(mkpath(id))
def fetch_and_build_idx(archive_id, repository, key, chunk_idx):
cdata = repository.get(archive_id)
data = key.decrypt(archive_id, cdata)
chunk_idx.add(archive_id, 1, len(data), len(cdata))
archive = ArchiveItem(internal_dict=msgpack.unpackb(data))
if archive.version != 1:
raise Exception('Unknown archive metadata version')
unpacker = msgpack.Unpacker()
for item_id, chunk in zip(archive.items, repository.get_many(archive.items)):
data = key.decrypt(item_id, chunk)
chunk_idx.add(item_id, 1, len(data), len(chunk))
unpacker.feed(data)
for item in unpacker:
if not isinstance(item, dict):
logger.error('Error: Did not get expected metadata dict - archive corrupted!')
continue
for chunk_id, size, csize in item.get(b'chunks', []):
chunk_idx.add(chunk_id, 1, size, csize)
if self.do_cache:
fn = mkpath(archive_id)
fn_tmp = mkpath(archive_id, suffix='.tmp')
try:
chunk_idx.write(fn_tmp)
except Exception:
os.unlink(fn_tmp)
else:
os.rename(fn_tmp, fn)
def lookup_name(archive_id):
for info in self.manifest.archives.list():
if info.id == archive_id:
return info.name
def create_master_idx(chunk_idx):
logger.info('Synchronizing chunks cache...')
cached_ids = cached_archives()
archive_ids = repo_archives()
logger.info('Archives: %d, w/ cached Idx: %d, w/ outdated Idx: %d, w/o cached Idx: %d.' % (
len(archive_ids), len(cached_ids),
len(cached_ids - archive_ids), len(archive_ids - cached_ids), ))
# deallocates old hashindex, creates empty hashindex:
chunk_idx.clear()
cleanup_outdated(cached_ids - archive_ids)
if archive_ids:
chunk_idx = None
if self.progress:
pi = ProgressIndicatorPercent(total=len(archive_ids), step=0.1,
msg='%3.0f%% Syncing chunks cache. Processing archive %s',
msgid='cache.sync')
for archive_id in archive_ids:
archive_name = lookup_name(archive_id)
if self.progress:
pi.show(info=[remove_surrogates(archive_name)])
if self.do_cache:
if archive_id in cached_ids:
archive_chunk_idx_path = mkpath(archive_id)
logger.info("Reading cached archive chunk index for %s ..." % archive_name)
archive_chunk_idx = ChunkIndex.read(archive_chunk_idx_path)
else:
logger.info('Fetching and building archive index for %s ...' % archive_name)
archive_chunk_idx = ChunkIndex()
fetch_and_build_idx(archive_id, repository, self.key, archive_chunk_idx)
logger.info("Merging into master chunks index ...")
if chunk_idx is None:
# we just use the first archive's idx as starting point,
# to avoid growing the hash table from 0 size and also
# to save 1 merge call.
chunk_idx = archive_chunk_idx
else:
chunk_idx.merge(archive_chunk_idx)
else:
chunk_idx = chunk_idx or ChunkIndex()
logger.info('Fetching archive index for %s ...' % archive_name)
fetch_and_build_idx(archive_id, repository, self.key, chunk_idx)
if self.progress:
pi.finish()
logger.info('Done.')
return chunk_idx
def legacy_cleanup():
"""bring old cache dirs into the desired state (cleanup and adapt)"""
try:
os.unlink(os.path.join(self.path, 'chunks.archive'))
except:
pass
try:
os.unlink(os.path.join(self.path, 'chunks.archive.tmp'))
except:
pass
try:
os.mkdir(archive_path)
except:
pass
self.begin_txn()
with cache_if_remote(self.repository) as repository:
legacy_cleanup()
# TEMPORARY HACK: to avoid archive index caching, create a FILE named ~/.cache/borg/REPOID/chunks.archive.d -
# this is only recommended if you have a fast, low latency connection to your repo (e.g. if repo is local disk)
self.do_cache = os.path.isdir(archive_path)
self.chunks = create_master_idx(self.chunks)
def add_chunk(self, id, chunk, stats, overwrite=False, wait=True):
if not self.txn_active:
self.begin_txn()
size = len(chunk)
refcount = self.seen_chunk(id, size)
if refcount and not overwrite:
return self.chunk_incref(id, stats)
data = self.key.encrypt(chunk)
csize = len(data)
self.repository.put(id, data, wait=wait)
self.chunks.add(id, 1, size, csize)
stats.update(size, csize, not refcount)
return ChunkListEntry(id, size, csize)
def seen_chunk(self, id, size=None):
refcount, stored_size, _ = self.chunks.get(id, ChunkIndexEntry(0, None, None))
if size is not None and stored_size is not None and size != stored_size:
# we already have a chunk with that id, but different size.
# this is either a hash collision (unlikely) or corruption or a bug.
raise Exception("chunk has same id [%r], but different size (stored: %d new: %d)!" % (
id, stored_size, size))
return refcount
def chunk_incref(self, id, stats):
if not self.txn_active:
self.begin_txn()
count, size, csize = self.chunks.incref(id)
stats.update(size, csize, False)
return ChunkListEntry(id, size, csize)
def chunk_decref(self, id, stats, wait=True):
if not self.txn_active:
self.begin_txn()
count, size, csize = self.chunks.decref(id)
if count == 0:
del self.chunks[id]
self.repository.delete(id, wait=wait)
stats.update(-size, -csize, True)
else:
stats.update(-size, -csize, False)
def file_known_and_unchanged(self, path_hash, st, ignore_inode=False):
if not (self.do_files and stat.S_ISREG(st.st_mode)):
return None
if self.files is None:
self._read_files()
entry = self.files.get(path_hash)
if not entry:
return None
entry = FileCacheEntry(*msgpack.unpackb(entry))
if (entry.size == st.st_size and bigint_to_int(entry.mtime) == st.st_mtime_ns and
(ignore_inode or entry.inode == st.st_ino)):
# we ignored the inode number in the comparison above or it is still same.
# if it is still the same, replacing it in the tuple doesn't change it.
# if we ignored it, a reason for doing that is that files were moved to a new
# disk / new fs (so a one-time change of inode number is expected) and we wanted
# to avoid everything getting chunked again. to be able to re-enable the inode
# number comparison in a future backup run (and avoid chunking everything
# again at that time), we need to update the inode number in the cache with what
# we see in the filesystem.
self.files[path_hash] = msgpack.packb(entry._replace(inode=st.st_ino, age=0))
return entry.chunk_ids
else:
return None
def memorize_file(self, path_hash, st, ids):
if not (self.do_files and stat.S_ISREG(st.st_mode)):
return
mtime_ns = safe_ns(st.st_mtime_ns)
entry = FileCacheEntry(age=0, inode=st.st_ino, size=st.st_size, mtime=int_to_bigint(mtime_ns), chunk_ids=ids)
self.files[path_hash] = msgpack.packb(entry)
self._newest_mtime = max(self._newest_mtime or 0, mtime_ns)
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Provides a database backend to the central scheduler. This lets you see historical runs.
See :ref:`TaskHistory` for information about how to turn out the task history feature.
"""
#
# Description: Added codes for visualization of how long each task takes
# running-time until it reaches the next status (failed or done)
# At "{base_url}/tasklist", all completed(failed or done) tasks are shown.
# At "{base_url}/tasklist", a user can select one specific task to see
# how its running-time has changed over time.
# At "{base_url}/tasklist/{task_name}", it visualizes a multi-bar graph
# that represents the changes of the running-time for a selected task
# up to the next status (failed or done).
# This visualization let us know how the running-time of the specific task
# has changed over time.
#
# Copyright 2015 Naver Corp.
# Author Yeseul Park (yeseul.park@navercorp.com)
#
import datetime
import logging
from contextlib import contextmanager
from luigi import six
from luigi import configuration
from luigi import task_history
from luigi.task_status import DONE, FAILED, PENDING, RUNNING
import sqlalchemy
import sqlalchemy.ext.declarative
import sqlalchemy.orm
import sqlalchemy.orm.collections
Base = sqlalchemy.ext.declarative.declarative_base()
logger = logging.getLogger('luigi-interface')
class DbTaskHistory(task_history.TaskHistory):
"""
Task History that writes to a database using sqlalchemy.
Also has methods for useful db queries.
"""
@contextmanager
def _session(self, session=None):
if session:
yield session
else:
session = self.session_factory()
try:
yield session
except:
session.rollback()
raise
else:
session.commit()
def __init__(self):
config = configuration.get_config()
connection_string = config.get('task_history', 'db_connection')
self.engine = sqlalchemy.create_engine(connection_string)
self.session_factory = sqlalchemy.orm.sessionmaker(bind=self.engine, expire_on_commit=False)
Base.metadata.create_all(self.engine)
self.tasks = {} # task_id -> TaskRecord
def task_scheduled(self, task_id):
task = self._get_task(task_id, status=PENDING)
self._add_task_event(task, TaskEvent(event_name=PENDING, ts=datetime.datetime.now()))
def task_finished(self, task_id, successful):
event_name = DONE if successful else FAILED
task = self._get_task(task_id, status=event_name)
self._add_task_event(task, TaskEvent(event_name=event_name, ts=datetime.datetime.now()))
def task_started(self, task_id, worker_host):
task = self._get_task(task_id, status=RUNNING, host=worker_host)
self._add_task_event(task, TaskEvent(event_name=RUNNING, ts=datetime.datetime.now()))
def _get_task(self, task_id, status, host=None):
if task_id in self.tasks:
task = self.tasks[task_id]
task.status = status
if host:
task.host = host
else:
task = self.tasks[task_id] = task_history.Task(task_id, status, host)
return task
def _add_task_event(self, task, event):
for (task_record, session) in self._find_or_create_task(task):
task_record.events.append(event)
def _find_or_create_task(self, task):
with self._session() as session:
if task.record_id is not None:
logger.debug("Finding task with record_id [%d]", task.record_id)
task_record = session.query(TaskRecord).get(task.record_id)
if not task_record:
raise Exception("Task with record_id, but no matching Task record!")
yield (task_record, session)
else:
task_record = TaskRecord(name=task.task_family, host=task.host)
for (k, v) in six.iteritems(task.parameters):
task_record.parameters[k] = TaskParameter(name=k, value=v)
session.add(task_record)
yield (task_record, session)
if task.host:
task_record.host = task.host
task.record_id = task_record.id
def find_all_by_parameters(self, task_name, session=None, **task_params):
"""
Find tasks with the given task_name and the same parameters as the kwargs.
"""
with self._session(session) as session:
tasks = session.query(TaskRecord).join(TaskEvent).filter(TaskRecord.name == task_name).order_by(TaskEvent.ts).all()
for task in tasks:
if all(k in task.parameters and v == str(task.parameters[k].value) for (k, v) in six.iteritems(task_params)):
yield task
def find_all_by_name(self, task_name, session=None):
"""
Find all tasks with the given task_name.
"""
return self.find_all_by_parameters(task_name, session)
def find_latest_runs(self, session=None):
"""
Return tasks that have been updated in the past 24 hours.
"""
with self._session(session) as session:
yesterday = datetime.datetime.now() - datetime.timedelta(days=1)
return session.query(TaskRecord).\
join(TaskEvent).\
filter(TaskEvent.ts >= yesterday).\
group_by(TaskRecord.id, TaskEvent.event_name, TaskEvent.ts).\
order_by(TaskEvent.ts.desc()).\
all()
def find_all_runs(self, session=None):
"""
Return all tasks that have been updated.
"""
with self._session(session) as session:
return session.query(TaskRecord).all()
def find_all_events(self, session=None):
"""
Return all running/failed/done events.
"""
with self._session(session) as session:
return session.query(TaskEvent).all()
def find_task_by_id(self, id, session=None):
"""
Find task with the given record ID.
"""
with self._session(session) as session:
return session.query(TaskRecord).get(id)
class TaskParameter(Base):
"""
Table to track luigi.Parameter()s of a Task.
"""
__tablename__ = 'task_parameters'
task_id = sqlalchemy.Column(sqlalchemy.Integer, sqlalchemy.ForeignKey('tasks.id'), primary_key=True)
name = sqlalchemy.Column(sqlalchemy.String(128), primary_key=True)
value = sqlalchemy.Column(sqlalchemy.String(256))
def __repr__(self):
return "TaskParameter(task_id=%d, name=%s, value=%s)" % (self.task_id, self.name, self.value)
class TaskEvent(Base):
"""
Table to track when a task is scheduled, starts, finishes, and fails.
"""
__tablename__ = 'task_events'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
task_id = sqlalchemy.Column(sqlalchemy.Integer, sqlalchemy.ForeignKey('tasks.id'))
event_name = sqlalchemy.Column(sqlalchemy.String(20))
ts = sqlalchemy.Column(sqlalchemy.TIMESTAMP, index=True, nullable=False)
def __repr__(self):
return "TaskEvent(task_id=%s, event_name=%s, ts=%s" % (self.task_id, self.event_name, self.ts)
class TaskRecord(Base):
"""
Base table to track information about a luigi.Task.
References to other tables are available through task.events, task.parameters, etc.
"""
__tablename__ = 'tasks'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
name = sqlalchemy.Column(sqlalchemy.String(128), index=True)
host = sqlalchemy.Column(sqlalchemy.String(128))
parameters = sqlalchemy.orm.relationship(
'TaskParameter',
collection_class=sqlalchemy.orm.collections.attribute_mapped_collection('name'),
cascade="all, delete-orphan")
events = sqlalchemy.orm.relationship(
'TaskEvent',
order_by=(sqlalchemy.desc(TaskEvent.ts), sqlalchemy.desc(TaskEvent.id)),
backref='task')
def __repr__(self):
return "TaskRecord(name=%s, host=%s)" % (self.name, self.host)
|
|
# -*- coding: utf-8 -*-
"""Forms related to accounts management."""
from __future__ import unicode_literals
from collections import OrderedDict
from functools import reduce
from django import forms
from django.conf import settings
from django.contrib.auth import password_validation
from django.http import QueryDict
from django.urls import reverse
from django.utils.translation import ugettext as _, ugettext_lazy
from modoboa.core import signals as core_signals
from modoboa.core.models import User
from modoboa.lib import exceptions as lib_exceptions, fields as lib_fields
from modoboa.lib.email_utils import split_mailbox
from modoboa.lib.form_utils import (
DynamicForm, TabForms, WizardForm, WizardStep
)
from modoboa.lib.permissions import get_account_roles
from modoboa.lib.validators import validate_utf8_email
from modoboa.lib.web_utils import render_to_json_response, size2integer
from modoboa.parameters import tools as param_tools
from .. import lib, models, signals
class AccountFormGeneral(forms.ModelForm):
"""General account form."""
username = forms.CharField(
label=ugettext_lazy("Username"),
help_text=ugettext_lazy(
"The user's name. Must be a valid e-mail address for simple users "
"or administrators with a mailbox."
)
)
role = forms.ChoiceField(
label=ugettext_lazy("Role"),
choices=[("", ugettext_lazy("Choose"))],
help_text=ugettext_lazy("What level of permission this user will have")
)
random_password = forms.BooleanField(
label=ugettext_lazy("Random password"),
help_text=ugettext_lazy(
"Generate a random password. If you're updating this account and "
"check this box, a new password will be generated."
),
required=False
)
password1 = forms.CharField(
label=ugettext_lazy("Password"), widget=forms.widgets.PasswordInput,
required=False
)
password2 = forms.CharField(
label=ugettext_lazy("Confirmation"),
widget=forms.widgets.PasswordInput,
help_text=ugettext_lazy(
"Enter the same password as above, for verification."
),
required=False
)
class Meta:
model = User
fields = (
"username", "first_name", "last_name", "role", "is_active",
"master_user",
)
labels = {
"is_active": ugettext_lazy("Enabled")
}
def __init__(self, user, *args, **kwargs):
super(AccountFormGeneral, self).__init__(*args, **kwargs)
self.fields = OrderedDict(
(key, self.fields[key]) for key in
["role", "username", "first_name", "last_name",
"random_password", "password1", "password2",
"master_user", "is_active"]
)
self.user = user
condition = (
user.role == "DomainAdmins" or
user.role == "Resellers" and self.instance == user
)
if condition:
self.fields["role"] = forms.CharField(
label="",
widget=forms.HiddenInput(attrs={"class": "form-control"}),
required=False
)
else:
self.fields["role"].choices += (
get_account_roles(user, self.instance)
if self.instance.pk else get_account_roles(user)
)
if not user.is_superuser:
del self.fields["master_user"]
if not self.instance.pk:
return
domain_disabled = (
hasattr(self.instance, "mailbox") and
not self.instance.mailbox.domain.enabled
)
if domain_disabled:
self.fields["is_active"].widget.attrs["disabled"] = "disabled"
if args and domain_disabled:
del self.fields["is_active"]
self.fields["role"].initial = self.instance.role
condition = (
not self.instance.is_local and
param_tools.get_global_parameter(
"ldap_auth_method", app="core") == "directbind")
if condition:
del self.fields["random_password"]
del self.fields["password1"]
del self.fields["password2"]
def domain_is_disabled(self):
"""Little shortcut to get the domain's state.
We need this information inside a template and the form is the
only object available...
"""
if not hasattr(self.instance, "mailbox"):
return False
return not self.instance.mailbox.domain.enabled
def clean_role(self):
if self.user.role == "DomainAdmins":
if self.instance == self.user:
return "DomainAdmins"
return "SimpleUsers"
elif self.user.role == "Resellers" and self.instance == self.user:
return "Resellers"
return self.cleaned_data["role"]
def clean_username(self):
"""username must be a valid email address for simple users."""
username = self.cleaned_data["username"].lower()
if "role" not in self.cleaned_data:
return username
if self.cleaned_data["role"] != "SimpleUsers" and "@" not in username:
return username
username = username.lower()
validate_utf8_email(username)
return username
def clean(self):
"""Check master user mode."""
super(AccountFormGeneral, self).clean()
if self.errors:
return self.cleaned_data
condition = (
self.cleaned_data.get("master_user") and
self.cleaned_data["role"] != "SuperAdmins"
)
if condition:
self.add_error(
"master_user",
_("Only super administrators are allowed for this mode")
)
random_password = self.cleaned_data.get("random_password")
if random_password:
self.cleaned_data["password2"] = lib.make_password()
elif "random_password" in self.fields and not random_password:
password1 = self.cleaned_data.get("password1", "")
password2 = self.cleaned_data.get("password2", "")
empty_password = password1 == "" and password2 == ""
if not self.instance.pk or not empty_password:
if not password1:
self.add_error("password1", _("This field is required."))
if not password2:
self.add_error("password2", _("This field is required."))
if self.errors:
return self.cleaned_data
if password1 != password2:
self.add_error(
"password2",
_("The two password fields didn't match."))
return self.cleaned_data
try:
password_validation.validate_password(
password2, self.instance)
except forms.ValidationError as ve:
self.add_error("password2", ve.messages)
return self.cleaned_data
def save(self, commit=True):
account = super(AccountFormGeneral, self).save(commit=False)
if self.user == account and not self.cleaned_data["is_active"]:
raise lib_exceptions.PermDeniedException(
_("You can't disable your own account"))
if not account.pk:
account.language = settings.LANGUAGE_CODE
if commit:
if self.cleaned_data.get("password2", "") != "":
account.set_password(self.cleaned_data["password2"])
account.save()
account.role = self.cleaned_data["role"]
return account
class AccountProfileForm(forms.ModelForm):
"""Form to edit account profile."""
class Meta:
model = User
fields = ("secondary_email", "phone_number", "language")
class AccountFormMail(forms.Form, DynamicForm):
"""Form to handle mail part."""
email = lib_fields.UTF8EmailField(
label=ugettext_lazy("E-mail"), required=False)
create_alias_with_old_address = forms.BooleanField(
label=ugettext_lazy("Create an alias using the old address"),
required=False,
initial=False
)
quota = forms.CharField(
label=ugettext_lazy("Quota"),
required=False,
help_text=_(
"Quota for this mailbox, can be expressed in KB, MB (default) or "
"GB. Define a custom value or "
"use domain's default one. Leave empty to define an "
"unlimited value (not allowed for domain "
"administrators)."
),
widget=forms.widgets.TextInput(attrs={"class": "form-control"})
)
quota_act = forms.BooleanField(required=False)
aliases = lib_fields.UTF8AndEmptyUserEmailField(
label=ugettext_lazy("Alias(es)"),
required=False,
help_text=ugettext_lazy(
"Alias(es) of this mailbox. Indicate only one address per input, "
"press ENTER to add a new input. To create a catchall alias, just "
"enter the domain name (@domain.tld)."
)
)
senderaddress = lib_fields.UTF8AndEmptyUserEmailField(
label=ugettext_lazy("Sender addresses"),
required=False,
help_text=ugettext_lazy(
"Additional sender address(es) for this account. The user will be "
"allowed to send emails using this address, even if it "
"does not exist locally. Indicate one address per input. Press "
"ENTER to add a new input."
)
)
def __init__(self, user, *args, **kwargs):
self.mb = kwargs.pop("instance", None)
self.user = user
super(AccountFormMail, self).__init__(*args, **kwargs)
self.field_widths = {
"quota": 3
}
if self.mb is not None:
self.fields["email"].required = True
qset = self.mb.aliasrecipient_set.filter(alias__internal=False)
for cpt, ralias in enumerate(qset):
name = "aliases_{}".format(cpt + 1)
self._create_field(
lib_fields.UTF8AndEmptyUserEmailField, name,
ralias.alias.address)
for cpt, saddress in enumerate(self.mb.senderaddress_set.all()):
name = "senderaddress_{}".format(cpt + 1)
self._create_field(
lib_fields.UTF8AndEmptyUserEmailField, name,
saddress.address)
self.fields["email"].initial = self.mb.full_address
self.fields["quota_act"].initial = self.mb.use_domain_quota
if not self.mb.use_domain_quota and self.mb.quota:
self.fields["quota"].initial = self.mb.quota
self.fields["create_alias_with_old_address"].initial = (
param_tools.get_global_parameter(
"create_alias_on_mbox_rename")
)
else:
del self.fields["create_alias_with_old_address"]
self.fields["quota_act"].initial = True
if len(args) and isinstance(args[0], QueryDict):
self._load_from_qdict(
args[0], "aliases", lib_fields.UTF8AndEmptyUserEmailField)
self._load_from_qdict(
args[0], "senderaddress",
lib_fields.UTF8AndEmptyUserEmailField)
def clean_email(self):
"""Ensure lower case emails"""
email = self.cleaned_data["email"].lower()
self.locpart, domname = split_mailbox(email)
if not domname:
return email
try:
self.domain = models.Domain.objects.get(name=domname)
except models.Domain.DoesNotExist:
raise forms.ValidationError(_("Domain does not exist"))
if not self.mb:
try:
core_signals.can_create_object.send(
sender=self.__class__, context=self.domain,
object_type="mailboxes")
except lib_exceptions.ModoboaException as inst:
raise forms.ValidationError(inst)
return email
def clean_quota(self):
"""Convert quota to Bytes."""
return size2integer(self.cleaned_data["quota"], output_unit="MB")
def clean(self):
"""Custom fields validation.
Check if quota is >= 0 only when the domain value is not used.
"""
cleaned_data = super(AccountFormMail, self).clean()
use_default_domain_quota = cleaned_data["quota_act"]
condition = (
not use_default_domain_quota and
cleaned_data["quota"] is not None and
cleaned_data["quota"] < 0)
if condition:
self.add_error("quota", _("Must be a positive integer"))
self.aliases = []
self.sender_addresses = []
for name, value in list(cleaned_data.items()):
if value == "":
continue
if name.startswith("aliases"):
local_part, domname = split_mailbox(value)
domain = models.Domain.objects.filter(name=domname).first()
if not domain:
self.add_error(name, _("Local domain does not exist"))
continue
if not self.user.can_access(domain):
self.add_error(
name, _("You don't have access to this domain"))
continue
self.aliases.append(value.lower())
elif name.startswith("senderaddress"):
local_part, domname = split_mailbox(value)
domain = models.Domain.objects.filter(name=domname).first()
if domain and not self.user.can_access(domain):
self.add_error(
name, _("You don't have access to this domain"))
continue
self.sender_addresses.append(value.lower())
return cleaned_data
def create_mailbox(self, user, account):
"""Create a mailbox associated to :kw:`account`."""
if not user.can_access(self.domain):
raise lib_exceptions.PermDeniedException
core_signals.can_create_object.send(
self.__class__, context=user, klass=models.Mailbox)
self.mb = models.Mailbox(
address=self.locpart, domain=self.domain, user=account,
use_domain_quota=self.cleaned_data["quota_act"])
override_rules = (
user.is_superuser or
user.has_perm("admin.add_domain") and
not user.userobjectlimit_set.get(name="quota").max_value
)
self.mb.set_quota(self.cleaned_data["quota"], override_rules)
self.mb.save(creator=user)
def _update_aliases(self, user, account):
"""Update mailbox aliases."""
qset = self.mb.aliasrecipient_set.select_related("alias").filter(
alias__internal=False)
for ralias in qset:
if ralias.alias.address not in self.aliases:
alias = ralias.alias
ralias.delete()
if alias.recipients_count > 0:
continue
alias.delete()
else:
self.aliases.remove(ralias.alias.address)
if not self.aliases:
return
core_signals.can_create_object.send(
self.__class__, context=user, klass=models.Alias,
count=len(self.aliases))
core_signals.can_create_object.send(
self.__class__, context=self.mb.domain,
object_type="mailbox_aliases", count=len(self.aliases))
for alias in self.aliases:
if self.mb.aliasrecipient_set.select_related("alias").filter(
alias__address=alias).exists():
continue
local_part, domname = split_mailbox(alias)
al = models.Alias(address=alias, enabled=account.is_active)
al.domain = models.Domain.objects.get(name=domname)
al.save()
al.set_recipients([self.mb.full_address])
al.post_create(user)
def _update_sender_addresses(self):
"""Update mailbox sender addresses."""
for saddress in self.mb.senderaddress_set.all():
if saddress.address not in self.sender_addresses:
saddress.delete()
else:
self.sender_addresses.remove(saddress.address)
if not len(self.sender_addresses):
return
to_create = []
for saddress in self.sender_addresses:
to_create.append(
models.SenderAddress(address=saddress, mailbox=self.mb))
models.SenderAddress.objects.bulk_create(to_create)
def save(self, user, account):
"""Save or update account mailbox."""
if self.cleaned_data["email"] == "":
return None
if self.cleaned_data["quota_act"]:
self.cleaned_data["quota"] = None
if not hasattr(self, "mb") or self.mb is None:
self.create_mailbox(user, account)
else:
self.cleaned_data["use_domain_quota"] = (
self.cleaned_data["quota_act"])
if self.cleaned_data.get("create_alias_with_old_address", False):
self.aliases.append(self.mb.full_address)
self.mb.update_from_dict(user, self.cleaned_data)
account.email = self.cleaned_data["email"]
account.save()
self._update_aliases(user, account)
self._update_sender_addresses()
return self.mb
class AccountPermissionsForm(forms.Form, DynamicForm):
"""A form to assign domain(s) permission."""
domains = lib_fields.DomainNameField(
label=ugettext_lazy("Domain(s)"),
required=False,
help_text=ugettext_lazy("Domain(s) that user administrates")
)
def __init__(self, *args, **kwargs):
if "instance" in kwargs:
self.account = kwargs["instance"]
del kwargs["instance"]
super(AccountPermissionsForm, self).__init__(*args, **kwargs)
if not hasattr(self, "account") or self.account is None:
return
qset = models.Domain.objects.get_for_admin(self.account)
for pos, dom in enumerate(qset):
name = "domains_%d" % (pos + 1)
self._create_field(lib_fields.DomainNameField, name, dom.name)
if len(args) and isinstance(args[0], QueryDict):
self._load_from_qdict(
args[0], "domains", lib_fields.DomainNameField)
def save(self):
current_domains = [
dom.name for dom in
models.Domain.objects.get_for_admin(self.account)
]
for name, value in self.cleaned_data.items():
if not name.startswith("domains"):
continue
if value in ["", None]:
continue
if value not in current_domains:
domain = models.Domain.objects.get(name=value)
domain.add_admin(self.account)
for domain in models.Domain.objects.get_for_admin(self.account):
if not filter(lambda name: self.cleaned_data[name] == domain.name,
self.cleaned_data.keys()):
domain.remove_admin(self.account)
class AccountForm(TabForms):
"""Account edition form."""
def __init__(self, request, *args, **kwargs):
self.user = request.user
self.forms = [
{"id": "general", "title": _("General"),
"formtpl": "admin/account_general_form.html",
"cls": AccountFormGeneral,
"new_args": [self.user], "mandatory": True},
{"id": "profile", "title": _("Profile"),
"formtpl": "admin/account_profile_form.html",
"cls": AccountProfileForm},
{"id": "mail",
"title": _("Mail"), "formtpl": "admin/mailform.html",
"cls": AccountFormMail,
"new_args": [self.user]},
{"id": "perms", "title": _("Permissions"),
"formtpl": "admin/permsform.html",
"cls": AccountPermissionsForm}
]
cbargs = {"user": self.user}
if "instances" in kwargs:
cbargs["account"] = kwargs["instances"]["general"]
results = signals.extra_account_forms.send(
sender=self.__class__, **cbargs)
self.forms += reduce(
lambda a, b: a + b, [result[1] for result in results])
super(AccountForm, self).__init__(request, *args, **kwargs)
def extra_context(self, context):
account = self.instances["general"]
context.update({
"title": account.username,
"formid": "accountform",
"action": reverse("admin:account_change",
args=[account.id]),
})
def check_perms(self, account):
"""Check if perms form must displayed or not."""
return (
self.user.is_superuser and
not account.is_superuser and
account.has_perm("core.add_user")
)
def _before_is_valid(self, form):
if form["id"] == "general":
return True
if hasattr(self, "check_%s" % form["id"]):
if not getattr(self, "check_%s" % form["id"])(self.account):
return False
return True
results = signals.check_extra_account_form.send(
sender=self.__class__, account=self.account, form=form)
results = [result[1] for result in results]
if False in results:
return False
return True
def is_valid(self):
"""Two steps validation."""
self.instances["general"].oldgroup = self.instances["general"].role
if super(AccountForm, self).is_valid(mandatory_only=True):
self.account = self.forms[0]["instance"].save()
return super(AccountForm, self).is_valid(optional_only=True)
return False
def save(self):
"""Custom save method
As forms interact with each other, it is simpler to make
custom code to save them.
"""
self.forms[1]["instance"].save()
self.forms[2]["instance"].save(self.user, self.account)
if len(self.forms) <= 3:
return
for f in self.forms[3:]:
f["instance"].save()
def done(self):
return render_to_json_response(_("Account updated"))
class AccountWizard(WizardForm):
"""Account creation wizard."""
def __init__(self, request):
super(AccountWizard, self).__init__(request)
self.add_step(
WizardStep(
"general", AccountFormGeneral, _("General"),
new_args=[request.user]
)
)
self.add_step(
WizardStep(
"mail", AccountFormMail, _("Mail"),
"admin/mailform.html",
new_args=[request.user]
)
)
def extra_context(self, context):
context.update({
"title": _("New account"),
"action": reverse("admin:account_add"),
"formid": "newaccount_form"
})
def done(self):
account = self.first_step.form.save()
account.post_create(self.request.user)
mailform = self.steps[1].form
mailform.save(self.request.user, account)
return render_to_json_response(_("Account created"))
|
|
#!/usr/bin/env python
"""A deserializer that decodes EE object trees from JSON DAGs."""
# Using lowercase function naming to match the JavaScript names.
# pylint: disable=g-bad-name
# pylint: disable=g-bad-import-order
import json
import numbers
import six
from . import apifunction
from . import computedobject
from . import customfunction
from . import ee_date
from . import ee_exception
from . import encodable
from . import function
from . import geometry
def fromJSON(json_obj):
"""Deserialize an object from a JSON string appropriate for API calls.
Args:
json_obj: The JSON representation of the input.
Returns:
The deserialized object.
"""
return decode(json.loads(json_obj))
def decode(json_obj):
"""Decodes an object previously encoded using the EE API v2 (DAG) format.
Args:
json_obj: The serialied object to decode.
Returns:
The decoded object.
"""
if 'values' in json_obj and 'result' in json_obj:
return decodeCloudApi(json_obj)
named_values = {}
# Incrementally decode scope entries if there are any.
if isinstance(json_obj, dict) and json_obj['type'] == 'CompoundValue':
for i, (key, value) in enumerate(json_obj['scope']):
if key in named_values:
raise ee_exception.EEException(
'Duplicate scope key "%s" in scope #%d.' % (key, i))
named_values[key] = _decodeValue(value, named_values)
json_obj = json_obj['value']
# Decode the final value.
return _decodeValue(json_obj, named_values)
def _decodeValue(json_obj, named_values):
"""Decodes an object previously encoded using the EE API v2 (DAG) format.
This uses a provided scope for ValueRef lookup and does not not allow the
input to be a CompoundValue.
Args:
json_obj: The serialied object to decode.
named_values: The objects that can be referenced by ValueRefs.
Returns:
The decoded object.
"""
# Check for primitive values.
if (json_obj is None or
isinstance(json_obj, (bool, numbers.Number, six.string_types))):
return json_obj
# Check for array values.
if isinstance(json_obj, (list, tuple)):
return [_decodeValue(element, named_values) for element in json_obj]
# Ensure that we've got a proper object at this point.
if not isinstance(json_obj, dict):
raise ee_exception.EEException('Cannot decode object: ' + json_obj)
# Check for explicitly typed values.
type_name = json_obj['type']
if type_name == 'ValueRef':
if json_obj['value'] in named_values:
return named_values[json_obj['value']]
else:
raise ee_exception.EEException('Unknown ValueRef: ' + json_obj)
elif type_name == 'ArgumentRef':
var_name = json_obj['value']
if not isinstance(var_name, six.string_types):
raise ee_exception.EEException('Invalid variable name: ' + var_name)
return customfunction.CustomFunction.variable(None, var_name) # pylint: disable=protected-access
elif type_name == 'Date':
microseconds = json_obj['value']
if not isinstance(microseconds, numbers.Number):
raise ee_exception.EEException('Invalid date value: ' + microseconds)
return ee_date.Date(microseconds / 1e3)
elif type_name == 'Bytes':
result = encodable.Encodable()
result.encode = lambda encoder: json_obj
node = {'bytesValue': json_obj['value']}
result.encode_cloud_value = lambda encoder: node
return result
elif type_name == 'Invocation':
if 'functionName' in json_obj:
func = apifunction.ApiFunction.lookup(json_obj['functionName'])
else:
func = _decodeValue(json_obj['function'], named_values)
args = dict((key, _decodeValue(value, named_values))
for (key, value) in json_obj['arguments'].items())
return _invocation(func, args)
elif type_name == 'Dictionary':
return dict((key, _decodeValue(value, named_values))
for (key, value) in json_obj['value'].items())
elif type_name == 'Function':
body = _decodeValue(json_obj['body'], named_values)
signature = {
'name': '',
'args': [{'name': arg_name, 'type': 'Object', 'optional': False}
for arg_name in json_obj['argumentNames']],
'returns': 'Object'
}
return customfunction.CustomFunction(signature, lambda *args: body)
elif type_name in ('Point', 'MultiPoint', 'LineString', 'MultiLineString',
'Polygon', 'MultiPolygon', 'LinearRing',
'GeometryCollection'):
return geometry.Geometry(json_obj)
elif type_name == 'CompoundValue':
raise ee_exception.EEException('Nested CompoundValues are disallowed.')
else:
raise ee_exception.EEException('Unknown encoded object type: ' + type_name)
def _invocation(func, args):
"""Creates an EE object representing the application of `func` to `args`."""
if isinstance(func, function.Function):
return func.apply(args)
elif isinstance(func, computedobject.ComputedObject):
# We have to allow ComputedObjects for cases where invocations
# return a function, e.g. Image.parseExpression().
return computedobject.ComputedObject(func, args)
raise ee_exception.EEException('Invalid function value: %s' % func)
def fromCloudApiJSON(json_obj):
"""Deserializes an object from the JSON string used in Cloud API calls.
Args:
json_obj: The JSON representation of the input.
Returns:
The deserialized object.
"""
return decodeCloudApi(json.loads(json_obj))
def decodeCloudApi(json_obj):
"""Decodes an object previously encoded using the EE Cloud API format.
Args:
json_obj: The serialized object to decode.
Returns:
The decoded object.
"""
decoded = {}
def lookup(reference, kind):
if reference not in decoded:
if reference not in json_obj['values']:
raise ee_exception.EEException('Cannot find %s %s' % (reference, kind))
decoded[reference] = decode_node(json_obj['values'][reference])
return decoded[reference]
def decode_node(node):
if 'constantValue' in node:
return node['constantValue']
elif 'arrayValue' in node:
return [decode_node(x) for x in node['arrayValue']['values']]
elif 'dictionaryValue' in node:
return {key: decode_node(x)
for key, x in six.iteritems(node['dictionaryValue']['values'])}
elif 'argumentReference' in node:
return customfunction.CustomFunction.variable(
None, node['argumentReference']) # pylint: disable=protected-access
elif 'functionDefinitionValue' in node:
return decode_function_definition(node['functionDefinitionValue'])
elif 'functionInvocationValue' in node:
return decode_function_invocation(node['functionInvocationValue'])
elif 'bytesValue' in node:
return _decodeValue({'type': 'Bytes', 'value': node['bytesValue']}, {})
elif 'integerValue' in node:
return int(node['integerValue'])
elif 'valueReference' in node:
return lookup(node['valueReference'], 'reference')
return None
def decode_function_definition(defined):
body = lookup(defined['body'], 'function body')
signature_args = [{'name': name, 'type': 'Object', 'optional': False}
for name in defined['argumentNames']]
signature = {'args': signature_args, 'name': '', 'returns': 'Object'}
return customfunction.CustomFunction(signature, lambda *args: body)
def decode_function_invocation(invoked):
if 'functionReference' in invoked:
func = lookup(invoked['functionReference'], 'function')
else:
func = apifunction.ApiFunction.lookup(invoked['functionName'])
args = {
key: decode_node(x) for key, x in six.iteritems(invoked['arguments'])
}
return _invocation(func, args)
return lookup(json_obj['result'], 'result value')
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tests for swift.common.storage_policies """
import contextlib
import six
import logging
import unittest
import os
import mock
from functools import partial
from six.moves.configparser import ConfigParser
from tempfile import NamedTemporaryFile
from test.unit import patch_policies, FakeRing, temptree, DEFAULT_TEST_EC_TYPE
import swift.common.storage_policy
from swift.common.storage_policy import (
StoragePolicyCollection, POLICIES, PolicyError, parse_storage_policies,
reload_storage_policies, get_policy_string, split_policy_string,
BaseStoragePolicy, StoragePolicy, ECStoragePolicy, REPL_POLICY, EC_POLICY,
VALID_EC_TYPES, DEFAULT_EC_OBJECT_SEGMENT_SIZE, BindPortsCache)
from swift.common.ring import RingData
from swift.common.exceptions import RingLoadError
from pyeclib.ec_iface import ECDriver
class CapturingHandler(logging.Handler):
def __init__(self):
super(CapturingHandler, self).__init__()
self._records = []
def emit(self, record):
self._records.append(record)
@contextlib.contextmanager
def capture_logging(log_name):
captured = CapturingHandler()
logger = logging.getLogger(log_name)
logger.addHandler(captured)
try:
yield captured._records
finally:
logger.removeHandler(captured)
@BaseStoragePolicy.register('fake')
class FakeStoragePolicy(BaseStoragePolicy):
"""
Test StoragePolicy class - the only user at the moment is
test_validate_policies_type_invalid()
"""
def __init__(self, idx, name='', is_default=False, is_deprecated=False,
object_ring=None):
super(FakeStoragePolicy, self).__init__(
idx, name, is_default, is_deprecated, object_ring)
class TestStoragePolicies(unittest.TestCase):
def _conf(self, conf_str):
conf_str = "\n".join(line.strip() for line in conf_str.split("\n"))
conf = ConfigParser()
conf.readfp(six.StringIO(conf_str))
return conf
def assertRaisesWithMessage(self, exc_class, message, f, *args, **kwargs):
try:
f(*args, **kwargs)
except exc_class as err:
err_msg = str(err)
self.assertTrue(message in err_msg, 'Error message %r did not '
'have expected substring %r' % (err_msg, message))
else:
self.fail('%r did not raise %s' % (message, exc_class.__name__))
def test_policy_baseclass_instantiate(self):
self.assertRaisesWithMessage(TypeError,
"Can't instantiate BaseStoragePolicy",
BaseStoragePolicy, 1, 'one')
@patch_policies([
StoragePolicy(0, 'zero', is_default=True),
StoragePolicy(1, 'one'),
StoragePolicy(2, 'two'),
StoragePolicy(3, 'three', is_deprecated=True),
ECStoragePolicy(10, 'ten', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=4),
])
def test_swift_info(self):
# the deprecated 'three' should not exist in expect
expect = [{'aliases': 'zero', 'default': True, 'name': 'zero', },
{'aliases': 'two', 'name': 'two'},
{'aliases': 'one', 'name': 'one'},
{'aliases': 'ten', 'name': 'ten'}]
swift_info = POLICIES.get_policy_info()
self.assertEqual(sorted(expect, key=lambda k: k['name']),
sorted(swift_info, key=lambda k: k['name']))
@patch_policies
def test_get_policy_string(self):
self.assertEqual(get_policy_string('something', 0), 'something')
self.assertEqual(get_policy_string('something', None), 'something')
self.assertEqual(get_policy_string('something', ''), 'something')
self.assertEqual(get_policy_string('something', 1),
'something' + '-1')
self.assertRaises(PolicyError, get_policy_string, 'something', 99)
@patch_policies
def test_split_policy_string(self):
expectations = {
'something': ('something', POLICIES[0]),
'something-1': ('something', POLICIES[1]),
'tmp': ('tmp', POLICIES[0]),
'objects': ('objects', POLICIES[0]),
'tmp-1': ('tmp', POLICIES[1]),
'objects-1': ('objects', POLICIES[1]),
'objects-': PolicyError,
'objects-0': PolicyError,
'objects--1': ('objects-', POLICIES[1]),
'objects-+1': PolicyError,
'objects--': PolicyError,
'objects-foo': PolicyError,
'objects--bar': PolicyError,
'objects-+bar': PolicyError,
# questionable, demonstrated as inverse of get_policy_string
'objects+0': ('objects+0', POLICIES[0]),
'': ('', POLICIES[0]),
'0': ('0', POLICIES[0]),
'-1': ('', POLICIES[1]),
}
for policy_string, expected in expectations.items():
if expected == PolicyError:
try:
invalid = split_policy_string(policy_string)
except PolicyError:
continue # good
else:
self.fail('The string %r returned %r '
'instead of raising a PolicyError' %
(policy_string, invalid))
self.assertEqual(expected, split_policy_string(policy_string))
# should be inverse of get_policy_string
self.assertEqual(policy_string, get_policy_string(*expected))
def test_defaults(self):
self.assertGreater(len(POLICIES), 0)
# test class functions
default_policy = POLICIES.default
self.assertTrue(default_policy.is_default)
zero_policy = POLICIES.get_by_index(0)
self.assertTrue(zero_policy.idx == 0)
zero_policy_by_name = POLICIES.get_by_name(zero_policy.name)
self.assertTrue(zero_policy_by_name.idx == 0)
def test_storage_policy_repr(self):
test_policies = [StoragePolicy(0, 'aay', True),
StoragePolicy(1, 'bee', False),
StoragePolicy(2, 'cee', False),
ECStoragePolicy(10, 'ten',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=3),
ECStoragePolicy(11, 'eleven',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=3,
ec_duplication_factor=2)]
policies = StoragePolicyCollection(test_policies)
for policy in policies:
policy_repr = repr(policy)
self.assertTrue(policy.__class__.__name__ in policy_repr)
self.assertTrue('is_default=%s' % policy.is_default in policy_repr)
self.assertTrue('is_deprecated=%s' % policy.is_deprecated in
policy_repr)
self.assertTrue(policy.name in policy_repr)
if policy.policy_type == EC_POLICY:
self.assertTrue('ec_type=%s' % policy.ec_type in policy_repr)
self.assertTrue('ec_ndata=%s' % policy.ec_ndata in policy_repr)
self.assertTrue('ec_nparity=%s' %
policy.ec_nparity in policy_repr)
self.assertTrue('ec_segment_size=%s' %
policy.ec_segment_size in policy_repr)
if policy.ec_duplication_factor > 1:
self.assertTrue('ec_duplication_factor=%s' %
policy.ec_duplication_factor in
policy_repr)
collection_repr = repr(policies)
collection_repr_lines = collection_repr.splitlines()
self.assertTrue(
policies.__class__.__name__ in collection_repr_lines[0])
self.assertEqual(len(policies), len(collection_repr_lines[1:-1]))
for policy, line in zip(policies, collection_repr_lines[1:-1]):
self.assertTrue(repr(policy) in line)
with patch_policies(policies):
self.assertEqual(repr(POLICIES), collection_repr)
def test_validate_policies_defaults(self):
# 0 explicit default
test_policies = [StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'one', False),
StoragePolicy(2, 'two', False)]
policies = StoragePolicyCollection(test_policies)
self.assertEqual(policies.default, test_policies[0])
self.assertEqual(policies.default.name, 'zero')
# non-zero explicit default
test_policies = [StoragePolicy(0, 'zero', False),
StoragePolicy(1, 'one', False),
StoragePolicy(2, 'two', True)]
policies = StoragePolicyCollection(test_policies)
self.assertEqual(policies.default, test_policies[2])
self.assertEqual(policies.default.name, 'two')
# multiple defaults
test_policies = [StoragePolicy(0, 'zero', False),
StoragePolicy(1, 'one', True),
StoragePolicy(2, 'two', True)]
self.assertRaisesWithMessage(
PolicyError, 'Duplicate default', StoragePolicyCollection,
test_policies)
# nothing specified
test_policies = []
policies = StoragePolicyCollection(test_policies)
self.assertEqual(policies.default, policies[0])
self.assertEqual(policies.default.name, 'Policy-0')
# no default specified with only policy index 0
test_policies = [StoragePolicy(0, 'zero')]
policies = StoragePolicyCollection(test_policies)
self.assertEqual(policies.default, policies[0])
# no default specified with multiple policies
test_policies = [StoragePolicy(0, 'zero', False),
StoragePolicy(1, 'one', False),
StoragePolicy(2, 'two', False)]
self.assertRaisesWithMessage(
PolicyError, 'Unable to find default policy',
StoragePolicyCollection, test_policies)
def test_deprecate_policies(self):
# deprecation specified
test_policies = [StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'one', False),
StoragePolicy(2, 'two', False, is_deprecated=True)]
policies = StoragePolicyCollection(test_policies)
self.assertEqual(policies.default, test_policies[0])
self.assertEqual(policies.default.name, 'zero')
self.assertEqual(len(policies), 3)
# multiple policies requires default
test_policies = [StoragePolicy(0, 'zero', False),
StoragePolicy(1, 'one', False, is_deprecated=True),
StoragePolicy(2, 'two', False)]
self.assertRaisesWithMessage(
PolicyError, 'Unable to find default policy',
StoragePolicyCollection, test_policies)
def test_validate_policies_indexes(self):
# duplicate indexes
test_policies = [StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'one', False),
StoragePolicy(1, 'two', False)]
self.assertRaises(PolicyError, StoragePolicyCollection,
test_policies)
def test_validate_policy_params(self):
StoragePolicy(0, 'name') # sanity
# bogus indexes
self.assertRaises(PolicyError, FakeStoragePolicy, 'x', 'name')
self.assertRaises(PolicyError, FakeStoragePolicy, -1, 'name')
# non-zero Policy-0
self.assertRaisesWithMessage(PolicyError, 'reserved',
FakeStoragePolicy, 1, 'policy-0')
# deprecate default
self.assertRaisesWithMessage(
PolicyError, 'Deprecated policy can not be default',
FakeStoragePolicy, 1, 'Policy-1', is_default=True,
is_deprecated=True)
# weird names
names = (
'',
'name_foo',
'name\nfoo',
'name foo',
u'name \u062a',
'name \xd8\xaa',
)
for name in names:
self.assertRaisesWithMessage(PolicyError, 'Invalid name',
FakeStoragePolicy, 1, name)
def test_validate_policies_names(self):
# duplicate names
test_policies = [StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'zero', False),
StoragePolicy(2, 'two', False)]
self.assertRaises(PolicyError, StoragePolicyCollection,
test_policies)
def test_validate_policies_type_default(self):
# no type specified - make sure the policy is initialized to
# DEFAULT_POLICY_TYPE
test_policy = FakeStoragePolicy(0, 'zero', True)
self.assertEqual(test_policy.policy_type, 'fake')
def test_validate_policies_type_invalid(self):
class BogusStoragePolicy(FakeStoragePolicy):
policy_type = 'bogus'
# unsupported policy type - initialization with FakeStoragePolicy
self.assertRaisesWithMessage(PolicyError, 'Invalid type',
BogusStoragePolicy, 1, 'one')
def test_policies_type_attribute(self):
test_policies = [
StoragePolicy(0, 'zero', is_default=True),
StoragePolicy(1, 'one'),
StoragePolicy(2, 'two'),
StoragePolicy(3, 'three', is_deprecated=True),
ECStoragePolicy(10, 'ten', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=3),
]
policies = StoragePolicyCollection(test_policies)
self.assertEqual(policies.get_by_index(0).policy_type,
REPL_POLICY)
self.assertEqual(policies.get_by_index(1).policy_type,
REPL_POLICY)
self.assertEqual(policies.get_by_index(2).policy_type,
REPL_POLICY)
self.assertEqual(policies.get_by_index(3).policy_type,
REPL_POLICY)
self.assertEqual(policies.get_by_index(10).policy_type,
EC_POLICY)
def test_names_are_normalized(self):
test_policies = [StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'ZERO', False)]
self.assertRaises(PolicyError, StoragePolicyCollection,
test_policies)
policies = StoragePolicyCollection([StoragePolicy(0, 'zEro', True),
StoragePolicy(1, 'One', False)])
pol0 = policies[0]
pol1 = policies[1]
for name in ('zero', 'ZERO', 'zErO', 'ZeRo'):
self.assertEqual(pol0, policies.get_by_name(name))
self.assertEqual(policies.get_by_name(name).name, 'zEro')
for name in ('one', 'ONE', 'oNe', 'OnE'):
self.assertEqual(pol1, policies.get_by_name(name))
self.assertEqual(policies.get_by_name(name).name, 'One')
def test_multiple_names(self):
# checking duplicate on insert
test_policies = [StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'one', False, aliases='zero')]
self.assertRaises(PolicyError, StoragePolicyCollection,
test_policies)
# checking correct retrival using other names
test_policies = [StoragePolicy(0, 'zero', True, aliases='cero, kore'),
StoragePolicy(1, 'one', False, aliases='uno, tahi'),
StoragePolicy(2, 'two', False, aliases='dos, rua')]
policies = StoragePolicyCollection(test_policies)
for name in ('zero', 'cero', 'kore'):
self.assertEqual(policies.get_by_name(name), test_policies[0])
for name in ('two', 'dos', 'rua'):
self.assertEqual(policies.get_by_name(name), test_policies[2])
# Testing parsing of conf files/text
good_conf = self._conf("""
[storage-policy:0]
name = one
aliases = uno, tahi
default = yes
""")
policies = parse_storage_policies(good_conf)
self.assertEqual(policies.get_by_name('one'),
policies[0])
self.assertEqual(policies.get_by_name('one'),
policies.get_by_name('tahi'))
name_repeat_conf = self._conf("""
[storage-policy:0]
name = one
aliases = one
default = yes
""")
# Test on line below should not generate errors. Repeat of main
# name under aliases is permitted during construction
# but only because automated testing requires it.
policies = parse_storage_policies(name_repeat_conf)
extra_commas_conf = self._conf("""
[storage-policy:0]
name = one
aliases = ,,one, ,
default = yes
""")
# Extra blank entries should be silently dropped
policies = parse_storage_policies(extra_commas_conf)
bad_conf = self._conf("""
[storage-policy:0]
name = one
aliases = uno, uno
default = yes
""")
self.assertRaisesWithMessage(PolicyError,
'is already assigned to this policy',
parse_storage_policies, bad_conf)
def test_multiple_names_EC(self):
# checking duplicate names on insert
test_policies_ec = [
ECStoragePolicy(
0, 'ec8-2',
aliases='zeus, jupiter',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=8, ec_nparity=2,
object_ring=FakeRing(replicas=8),
is_default=True),
ECStoragePolicy(
1, 'ec10-4',
aliases='ec8-2',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=4,
object_ring=FakeRing(replicas=10))]
self.assertRaises(PolicyError, StoragePolicyCollection,
test_policies_ec)
# checking correct retrival using other names
good_test_policies_EC = [
ECStoragePolicy(0, 'ec8-2', aliases='zeus, jupiter',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=8, ec_nparity=2,
object_ring=FakeRing(replicas=10),
is_default=True),
ECStoragePolicy(1, 'ec10-4', aliases='athena, minerva',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=4,
object_ring=FakeRing(replicas=14)),
ECStoragePolicy(2, 'ec4-2', aliases='poseidon, neptune',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=4, ec_nparity=2,
object_ring=FakeRing(replicas=6)),
ECStoragePolicy(3, 'ec4-2-dup', aliases='uzuki, rin',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=4, ec_nparity=2,
ec_duplication_factor=2,
object_ring=FakeRing(replicas=12)),
]
ec_policies = StoragePolicyCollection(good_test_policies_EC)
for name in ('ec8-2', 'zeus', 'jupiter'):
self.assertEqual(ec_policies.get_by_name(name), ec_policies[0])
for name in ('ec10-4', 'athena', 'minerva'):
self.assertEqual(ec_policies.get_by_name(name), ec_policies[1])
for name in ('ec4-2', 'poseidon', 'neptune'):
self.assertEqual(ec_policies.get_by_name(name), ec_policies[2])
for name in ('ec4-2-dup', 'uzuki', 'rin'):
self.assertEqual(ec_policies.get_by_name(name), ec_policies[3])
# Testing parsing of conf files/text
good_ec_conf = self._conf("""
[storage-policy:0]
name = ec8-2
aliases = zeus, jupiter
policy_type = erasure_coding
ec_type = %(ec_type)s
default = yes
ec_num_data_fragments = 8
ec_num_parity_fragments = 2
[storage-policy:1]
name = ec10-4
aliases = poseidon, neptune
policy_type = erasure_coding
ec_type = %(ec_type)s
ec_num_data_fragments = 10
ec_num_parity_fragments = 4
[storage-policy:2]
name = ec4-2-dup
aliases = uzuki, rin
policy_type = erasure_coding
ec_type = %(ec_type)s
ec_num_data_fragments = 4
ec_num_parity_fragments = 2
ec_duplication_factor = 2
""" % {'ec_type': DEFAULT_TEST_EC_TYPE})
ec_policies = parse_storage_policies(good_ec_conf)
self.assertEqual(ec_policies.get_by_name('ec8-2'),
ec_policies[0])
self.assertEqual(ec_policies.get_by_name('ec10-4'),
ec_policies.get_by_name('poseidon'))
self.assertEqual(ec_policies.get_by_name('ec4-2-dup'),
ec_policies.get_by_name('uzuki'))
name_repeat_ec_conf = self._conf("""
[storage-policy:0]
name = ec8-2
aliases = ec8-2
policy_type = erasure_coding
ec_type = %(ec_type)s
default = yes
ec_num_data_fragments = 8
ec_num_parity_fragments = 2
""" % {'ec_type': DEFAULT_TEST_EC_TYPE})
# Test on line below should not generate errors. Repeat of main
# name under aliases is permitted during construction
# but only because automated testing requires it.
ec_policies = parse_storage_policies(name_repeat_ec_conf)
bad_ec_conf = self._conf("""
[storage-policy:0]
name = ec8-2
aliases = zeus, zeus
policy_type = erasure_coding
ec_type = %(ec_type)s
default = yes
ec_num_data_fragments = 8
ec_num_parity_fragments = 2
""" % {'ec_type': DEFAULT_TEST_EC_TYPE})
self.assertRaisesWithMessage(PolicyError,
'is already assigned to this policy',
parse_storage_policies, bad_ec_conf)
def test_add_remove_names(self):
test_policies = [StoragePolicy(0, 'zero', True),
StoragePolicy(1, 'one', False),
StoragePolicy(2, 'two', False)]
policies = StoragePolicyCollection(test_policies)
# add names
policies.add_policy_alias(1, 'tahi')
self.assertEqual(policies.get_by_name('tahi'), test_policies[1])
policies.add_policy_alias(2, 'rua', 'dos')
self.assertEqual(policies.get_by_name('rua'), test_policies[2])
self.assertEqual(policies.get_by_name('dos'), test_policies[2])
self.assertRaisesWithMessage(PolicyError, 'Invalid name',
policies.add_policy_alias, 2, 'double\n')
self.assertRaisesWithMessage(PolicyError, 'Invalid name',
policies.add_policy_alias, 2, '')
# try to add existing name
self.assertRaisesWithMessage(PolicyError, 'Duplicate name',
policies.add_policy_alias, 2, 'two')
self.assertRaisesWithMessage(PolicyError, 'Duplicate name',
policies.add_policy_alias, 1, 'two')
# remove name
policies.remove_policy_alias('tahi')
self.assertEqual(policies.get_by_name('tahi'), None)
# remove only name
self.assertRaisesWithMessage(PolicyError,
'Policies must have at least one name.',
policies.remove_policy_alias, 'zero')
# remove non-existent name
self.assertRaisesWithMessage(PolicyError,
'No policy with name',
policies.remove_policy_alias, 'three')
# remove default name
policies.remove_policy_alias('two')
self.assertEqual(policies.get_by_name('two'), None)
self.assertEqual(policies.get_by_index(2).name, 'rua')
# change default name to a new name
policies.change_policy_primary_name(2, 'two')
self.assertEqual(policies.get_by_name('two'), test_policies[2])
self.assertEqual(policies.get_by_index(2).name, 'two')
# change default name to an existing alias
policies.change_policy_primary_name(2, 'dos')
self.assertEqual(policies.get_by_index(2).name, 'dos')
# change default name to a bad new name
self.assertRaisesWithMessage(PolicyError, 'Invalid name',
policies.change_policy_primary_name,
2, 'bad\nname')
# change default name to a name belonging to another policy
self.assertRaisesWithMessage(PolicyError,
'Other policy',
policies.change_policy_primary_name,
1, 'dos')
def test_deprecated_default(self):
bad_conf = self._conf("""
[storage-policy:1]
name = one
deprecated = yes
default = yes
""")
self.assertRaisesWithMessage(
PolicyError, "Deprecated policy can not be default",
parse_storage_policies, bad_conf)
def test_multiple_policies_with_no_policy_index_zero(self):
bad_conf = self._conf("""
[storage-policy:1]
name = one
default = yes
""")
# Policy-0 will not be implicitly added if other policies are defined
self.assertRaisesWithMessage(
PolicyError, "must specify a storage policy section "
"for policy index 0", parse_storage_policies, bad_conf)
@mock.patch.object(swift.common.storage_policy, 'VALID_EC_TYPES',
['isa_l_rs_vand', 'isa_l_rs_cauchy'])
@mock.patch('swift.common.storage_policy.ECDriver')
def test_known_bad_ec_config(self, mock_driver):
good_conf = self._conf("""
[storage-policy:0]
name = bad-policy
policy_type = erasure_coding
ec_type = isa_l_rs_cauchy
ec_num_data_fragments = 10
ec_num_parity_fragments = 5
""")
with capture_logging('swift.common.storage_policy') as records:
parse_storage_policies(good_conf)
mock_driver.assert_called_once()
mock_driver.reset_mock()
self.assertFalse([(r.levelname, r.msg) for r in records])
good_conf = self._conf("""
[storage-policy:0]
name = bad-policy
policy_type = erasure_coding
ec_type = isa_l_rs_vand
ec_num_data_fragments = 10
ec_num_parity_fragments = 4
""")
with capture_logging('swift.common.storage_policy') as records:
parse_storage_policies(good_conf)
mock_driver.assert_called_once()
mock_driver.reset_mock()
self.assertFalse([(r.levelname, r.msg) for r in records])
bad_conf = self._conf("""
[storage-policy:0]
name = bad-policy
policy_type = erasure_coding
ec_type = isa_l_rs_vand
ec_num_data_fragments = 10
ec_num_parity_fragments = 5
""")
with capture_logging('swift.common.storage_policy') as records:
parse_storage_policies(bad_conf)
mock_driver.assert_called_once()
mock_driver.reset_mock()
self.assertEqual([r.levelname for r in records],
['WARNING', 'WARNING'])
for msg in ('known to harm data durability',
'Any data in this policy should be migrated',
'https://bugs.launchpad.net/swift/+bug/1639691'):
self.assertIn(msg, records[0].msg)
self.assertIn('In a future release, this will prevent services from '
'starting', records[1].msg)
slightly_less_bad_conf = self._conf("""
[storage-policy:0]
name = bad-policy
policy_type = erasure_coding
ec_type = isa_l_rs_vand
ec_num_data_fragments = 10
ec_num_parity_fragments = 5
deprecated = true
[storage-policy:1]
name = good-policy
policy_type = erasure_coding
ec_type = isa_l_rs_cauchy
ec_num_data_fragments = 10
ec_num_parity_fragments = 5
default = true
""")
with capture_logging('swift.common.storage_policy') as records:
parse_storage_policies(slightly_less_bad_conf)
self.assertEqual(2, mock_driver.call_count)
mock_driver.reset_mock()
self.assertEqual([r.levelname for r in records],
['WARNING'])
for msg in ('known to harm data durability',
'Any data in this policy should be migrated',
'https://bugs.launchpad.net/swift/+bug/1639691'):
self.assertIn(msg, records[0].msg)
def test_no_default(self):
orig_conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:1]
name = one
default = yes
""")
policies = parse_storage_policies(orig_conf)
self.assertEqual(policies.default, policies[1])
self.assertTrue(policies[0].name, 'Policy-0')
bad_conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:1]
name = one
deprecated = yes
""")
# multiple polices and no explicit default
self.assertRaisesWithMessage(
PolicyError, "Unable to find default",
parse_storage_policies, bad_conf)
good_conf = self._conf("""
[storage-policy:0]
name = Policy-0
default = yes
[storage-policy:1]
name = one
deprecated = yes
""")
policies = parse_storage_policies(good_conf)
self.assertEqual(policies.default, policies[0])
self.assertTrue(policies[1].is_deprecated, True)
def test_parse_storage_policies(self):
# ValueError when deprecating policy 0
bad_conf = self._conf("""
[storage-policy:0]
name = zero
deprecated = yes
[storage-policy:1]
name = one
deprecated = yes
""")
self.assertRaisesWithMessage(
PolicyError, "Unable to find policy that's not deprecated",
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:]
name = zero
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid index',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:-1]
name = zero
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid index',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:x]
name = zero
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid index',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:x-1]
name = zero
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid index',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:x]
name = zero
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid index',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:x:1]
name = zero
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid index',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:1]
name = zero
boo = berries
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid option',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:0]
name =
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid name',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:3]
name = Policy-0
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid name',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:1]
name = policY-0
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid name',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:0]
name = one
[storage-policy:1]
name = ONE
""")
self.assertRaisesWithMessage(PolicyError, 'Duplicate name',
parse_storage_policies, bad_conf)
bad_conf = self._conf("""
[storage-policy:0]
name = good_stuff
""")
self.assertRaisesWithMessage(PolicyError, 'Invalid name',
parse_storage_policies, bad_conf)
# policy_type = erasure_coding
# missing ec_type, ec_num_data_fragments and ec_num_parity_fragments
bad_conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:1]
name = ec10-4
policy_type = erasure_coding
""")
self.assertRaisesWithMessage(PolicyError, 'Missing ec_type',
parse_storage_policies, bad_conf)
# missing ec_type, but other options valid...
bad_conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:1]
name = ec10-4
policy_type = erasure_coding
ec_num_data_fragments = 10
ec_num_parity_fragments = 4
""")
self.assertRaisesWithMessage(PolicyError, 'Missing ec_type',
parse_storage_policies, bad_conf)
# ec_type specified, but invalid...
bad_conf = self._conf("""
[storage-policy:0]
name = zero
default = yes
[storage-policy:1]
name = ec10-4
policy_type = erasure_coding
ec_type = garbage_alg
ec_num_data_fragments = 10
ec_num_parity_fragments = 4
""")
self.assertRaisesWithMessage(PolicyError,
'Wrong ec_type garbage_alg for policy '
'ec10-4, should be one of "%s"' %
(', '.join(VALID_EC_TYPES)),
parse_storage_policies, bad_conf)
# missing and invalid ec_num_parity_fragments
bad_conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:1]
name = ec10-4
policy_type = erasure_coding
ec_type = %(ec_type)s
ec_num_data_fragments = 10
""" % {'ec_type': DEFAULT_TEST_EC_TYPE})
self.assertRaisesWithMessage(PolicyError,
'Invalid ec_num_parity_fragments',
parse_storage_policies, bad_conf)
for num_parity in ('-4', '0', 'x'):
bad_conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:1]
name = ec10-4
policy_type = erasure_coding
ec_type = %(ec_type)s
ec_num_data_fragments = 10
ec_num_parity_fragments = %(num_parity)s
""" % {'ec_type': DEFAULT_TEST_EC_TYPE,
'num_parity': num_parity})
self.assertRaisesWithMessage(PolicyError,
'Invalid ec_num_parity_fragments',
parse_storage_policies, bad_conf)
# missing and invalid ec_num_data_fragments
bad_conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:1]
name = ec10-4
policy_type = erasure_coding
ec_type = %(ec_type)s
ec_num_parity_fragments = 4
""" % {'ec_type': DEFAULT_TEST_EC_TYPE})
self.assertRaisesWithMessage(PolicyError,
'Invalid ec_num_data_fragments',
parse_storage_policies, bad_conf)
for num_data in ('-10', '0', 'x'):
bad_conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:1]
name = ec10-4
policy_type = erasure_coding
ec_type = %(ec_type)s
ec_num_data_fragments = %(num_data)s
ec_num_parity_fragments = 4
""" % {'num_data': num_data, 'ec_type': DEFAULT_TEST_EC_TYPE})
self.assertRaisesWithMessage(PolicyError,
'Invalid ec_num_data_fragments',
parse_storage_policies, bad_conf)
# invalid ec_object_segment_size
for segment_size in ('-4', '0', 'x'):
bad_conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:1]
name = ec10-4
policy_type = erasure_coding
ec_object_segment_size = %(segment_size)s
ec_type = %(ec_type)s
ec_num_data_fragments = 10
ec_num_parity_fragments = 4
""" % {'segment_size': segment_size,
'ec_type': DEFAULT_TEST_EC_TYPE})
self.assertRaisesWithMessage(PolicyError,
'Invalid ec_object_segment_size',
parse_storage_policies, bad_conf)
# Additional section added to ensure parser ignores other sections
conf = self._conf("""
[some-other-section]
foo = bar
[storage-policy:0]
name = zero
[storage-policy:5]
name = one
default = yes
[storage-policy:6]
name = duplicate-sections-are-ignored
[storage-policy:6]
name = apple
""")
policies = parse_storage_policies(conf)
self.assertEqual(True, policies.get_by_index(5).is_default)
self.assertEqual(False, policies.get_by_index(0).is_default)
self.assertEqual(False, policies.get_by_index(6).is_default)
self.assertEqual("object", policies.get_by_name("zero").ring_name)
self.assertEqual("object-5", policies.get_by_name("one").ring_name)
self.assertEqual("object-6", policies.get_by_name("apple").ring_name)
self.assertEqual(0, int(policies.get_by_name('zero')))
self.assertEqual(5, int(policies.get_by_name('one')))
self.assertEqual(6, int(policies.get_by_name('apple')))
self.assertEqual("zero", policies.get_by_index(0).name)
self.assertEqual("zero", policies.get_by_index("0").name)
self.assertEqual("one", policies.get_by_index(5).name)
self.assertEqual("apple", policies.get_by_index(6).name)
self.assertEqual("zero", policies.get_by_index(None).name)
self.assertEqual("zero", policies.get_by_index('').name)
self.assertEqual(policies.get_by_index(0), policies.legacy)
def test_reload_invalid_storage_policies(self):
conf = self._conf("""
[storage-policy:0]
name = zero
[storage-policy:00]
name = double-zero
""")
with NamedTemporaryFile() as f:
conf.write(f)
f.flush()
with mock.patch('swift.common.storage_policy.SWIFT_CONF_FILE',
new=f.name):
try:
reload_storage_policies()
except SystemExit as e:
err_msg = str(e)
else:
self.fail('SystemExit not raised')
parts = [
'Invalid Storage Policy Configuration',
'Duplicate index',
]
for expected in parts:
self.assertTrue(
expected in err_msg, '%s was not in %s' % (expected,
err_msg))
def test_storage_policy_ordering(self):
test_policies = StoragePolicyCollection([
StoragePolicy(0, 'zero', is_default=True),
StoragePolicy(503, 'error'),
StoragePolicy(204, 'empty'),
StoragePolicy(404, 'missing'),
])
self.assertEqual([0, 204, 404, 503], [int(p) for p in
sorted(list(test_policies))])
p503 = test_policies[503]
self.assertTrue(501 < p503 < 507)
def test_get_object_ring(self):
test_policies = [StoragePolicy(0, 'aay', True),
StoragePolicy(1, 'bee', False),
StoragePolicy(2, 'cee', False)]
policies = StoragePolicyCollection(test_policies)
class NamedFakeRing(FakeRing):
def __init__(self, swift_dir, ring_name=None):
self.ring_name = ring_name
super(NamedFakeRing, self).__init__()
with mock.patch('swift.common.storage_policy.Ring',
new=NamedFakeRing):
for policy in policies:
self.assertFalse(policy.object_ring)
ring = policies.get_object_ring(int(policy), '/path/not/used')
self.assertEqual(ring.ring_name, policy.ring_name)
self.assertTrue(policy.object_ring)
self.assertTrue(isinstance(policy.object_ring, NamedFakeRing))
def blow_up(*args, **kwargs):
raise Exception('kaboom!')
with mock.patch('swift.common.storage_policy.Ring', new=blow_up):
for policy in policies:
policy.load_ring('/path/not/used')
expected = policies.get_object_ring(int(policy),
'/path/not/used')
self.assertEqual(policy.object_ring, expected)
# bad policy index
self.assertRaises(PolicyError, policies.get_object_ring, 99,
'/path/not/used')
def test_bind_ports_cache(self):
test_policies = [StoragePolicy(0, 'aay', True),
StoragePolicy(1, 'bee', False),
StoragePolicy(2, 'cee', False)]
my_ips = ['1.2.3.4', '2.3.4.5']
other_ips = ['3.4.5.6', '4.5.6.7']
bind_ip = my_ips[1]
devs_by_ring_name1 = {
'object': [ # 'aay'
{'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[0],
'port': 6006},
{'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[0],
'port': 6007},
{'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[1],
'port': 6008},
None,
{'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1],
'port': 6009}],
'object-1': [ # 'bee'
{'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[1],
'port': 6006}, # dupe
{'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[0],
'port': 6010},
{'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[1],
'port': 6011},
{'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1],
'port': 6012}],
'object-2': [ # 'cee'
{'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[0],
'port': 6010}, # on our IP and a not-us IP
{'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[0],
'port': 6013},
None,
{'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[1],
'port': 6014},
{'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1],
'port': 6015}],
}
devs_by_ring_name2 = {
'object': [ # 'aay'
{'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[0],
'port': 6016},
{'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1],
'port': 6019}],
'object-1': [ # 'bee'
{'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[1],
'port': 6016}, # dupe
{'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1],
'port': 6022}],
'object-2': [ # 'cee'
{'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[0],
'port': 6020},
{'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1],
'port': 6025}],
}
ring_files = [ring_name + '.ring.gz'
for ring_name in sorted(devs_by_ring_name1)]
def _fake_load(gz_path, stub_objs, metadata_only=False):
return RingData(
devs=stub_objs[os.path.basename(gz_path)[:-8]],
replica2part2dev_id=[],
part_shift=24)
with mock.patch(
'swift.common.storage_policy.RingData.load'
) as mock_ld, \
patch_policies(test_policies), \
mock.patch('swift.common.storage_policy.whataremyips') \
as mock_whataremyips, \
temptree(ring_files) as tempdir:
mock_whataremyips.return_value = my_ips
cache = BindPortsCache(tempdir, bind_ip)
self.assertEqual([
mock.call(bind_ip),
], mock_whataremyips.mock_calls)
mock_whataremyips.reset_mock()
mock_ld.side_effect = partial(_fake_load,
stub_objs=devs_by_ring_name1)
self.assertEqual(set([
6006, 6008, 6011, 6010, 6014,
]), cache.all_bind_ports_for_node())
self.assertEqual([
mock.call(os.path.join(tempdir, ring_files[0]),
metadata_only=True),
mock.call(os.path.join(tempdir, ring_files[1]),
metadata_only=True),
mock.call(os.path.join(tempdir, ring_files[2]),
metadata_only=True),
], mock_ld.mock_calls)
mock_ld.reset_mock()
mock_ld.side_effect = partial(_fake_load,
stub_objs=devs_by_ring_name2)
self.assertEqual(set([
6006, 6008, 6011, 6010, 6014,
]), cache.all_bind_ports_for_node())
self.assertEqual([], mock_ld.mock_calls)
# but when all the file mtimes are made different, it'll
# reload
for gz_file in [os.path.join(tempdir, n)
for n in ring_files]:
os.utime(gz_file, (88, 88))
self.assertEqual(set([
6016, 6020,
]), cache.all_bind_ports_for_node())
self.assertEqual([
mock.call(os.path.join(tempdir, ring_files[0]),
metadata_only=True),
mock.call(os.path.join(tempdir, ring_files[1]),
metadata_only=True),
mock.call(os.path.join(tempdir, ring_files[2]),
metadata_only=True),
], mock_ld.mock_calls)
mock_ld.reset_mock()
# Don't do something stupid like crash if a ring file is missing.
os.unlink(os.path.join(tempdir, 'object-2.ring.gz'))
self.assertEqual(set([
6016, 6020,
]), cache.all_bind_ports_for_node())
self.assertEqual([], mock_ld.mock_calls)
# whataremyips() is only called in the constructor
self.assertEqual([], mock_whataremyips.mock_calls)
def test_singleton_passthrough(self):
test_policies = [StoragePolicy(0, 'aay', True),
StoragePolicy(1, 'bee', False),
StoragePolicy(2, 'cee', False)]
with patch_policies(test_policies):
for policy in POLICIES:
self.assertEqual(POLICIES[int(policy)], policy)
def test_quorum_size_replication(self):
expected_sizes = {1: 1,
2: 1,
3: 2,
4: 2,
5: 3}
for n, expected in expected_sizes.items():
policy = StoragePolicy(0, 'zero',
object_ring=FakeRing(replicas=n))
self.assertEqual(policy.quorum, expected)
def test_quorum_size_erasure_coding(self):
test_ec_policies = [
ECStoragePolicy(10, 'ec8-2', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=8, ec_nparity=2),
ECStoragePolicy(11, 'df10-6', ec_type='flat_xor_hd_4',
ec_ndata=10, ec_nparity=6),
ECStoragePolicy(12, 'ec4-2-dup', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=4, ec_nparity=2, ec_duplication_factor=2),
]
for ec_policy in test_ec_policies:
k = ec_policy.ec_ndata
expected_size = (
(k + ec_policy.pyeclib_driver.min_parity_fragments_needed())
* ec_policy.ec_duplication_factor
)
self.assertEqual(expected_size, ec_policy.quorum)
def test_validate_ring(self):
test_policies = [
ECStoragePolicy(0, 'ec8-2', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=8, ec_nparity=2,
is_default=True),
ECStoragePolicy(1, 'ec10-4', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=4),
ECStoragePolicy(2, 'ec4-2', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=4, ec_nparity=2),
ECStoragePolicy(3, 'ec4-2-2dup', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=4, ec_nparity=2,
ec_duplication_factor=2)
]
actual_load_ring_replicas = [8, 10, 7, 11]
policies = StoragePolicyCollection(test_policies)
class MockRingData(object):
def __init__(self, num_replica):
self._replica2part2dev_id = [0] * num_replica
for policy, ring_replicas in zip(policies, actual_load_ring_replicas):
with mock.patch('swift.common.ring.ring.RingData.load',
return_value=MockRingData(ring_replicas)):
necessary_replica_num = \
policy.ec_n_unique_fragments * policy.ec_duplication_factor
with mock.patch(
'swift.common.ring.ring.validate_configuration'):
msg = 'EC ring for policy %s needs to be configured with ' \
'exactly %d replicas.' % \
(policy.name, necessary_replica_num)
self.assertRaisesWithMessage(RingLoadError, msg,
policy.load_ring, 'mock')
def test_storage_policy_get_info(self):
test_policies = [
StoragePolicy(0, 'zero', is_default=True),
StoragePolicy(1, 'one', is_deprecated=True,
aliases='tahi, uno'),
ECStoragePolicy(10, 'ten',
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=3),
ECStoragePolicy(11, 'done', is_deprecated=True,
ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=10, ec_nparity=3),
]
policies = StoragePolicyCollection(test_policies)
expected = {
# default replication
(0, True): {
'name': 'zero',
'aliases': 'zero',
'default': True,
'deprecated': False,
'policy_type': REPL_POLICY
},
(0, False): {
'name': 'zero',
'aliases': 'zero',
'default': True,
},
# deprecated replication
(1, True): {
'name': 'one',
'aliases': 'one, tahi, uno',
'default': False,
'deprecated': True,
'policy_type': REPL_POLICY
},
(1, False): {
'name': 'one',
'aliases': 'one, tahi, uno',
'deprecated': True,
},
# enabled ec
(10, True): {
'name': 'ten',
'aliases': 'ten',
'default': False,
'deprecated': False,
'policy_type': EC_POLICY,
'ec_type': DEFAULT_TEST_EC_TYPE,
'ec_num_data_fragments': 10,
'ec_num_parity_fragments': 3,
'ec_object_segment_size': DEFAULT_EC_OBJECT_SEGMENT_SIZE,
'ec_duplication_factor': 1,
},
(10, False): {
'name': 'ten',
'aliases': 'ten',
},
# deprecated ec
(11, True): {
'name': 'done',
'aliases': 'done',
'default': False,
'deprecated': True,
'policy_type': EC_POLICY,
'ec_type': DEFAULT_TEST_EC_TYPE,
'ec_num_data_fragments': 10,
'ec_num_parity_fragments': 3,
'ec_object_segment_size': DEFAULT_EC_OBJECT_SEGMENT_SIZE,
'ec_duplication_factor': 1,
},
(11, False): {
'name': 'done',
'aliases': 'done',
'deprecated': True,
},
# enabled ec with ec_duplication
(12, True): {
'name': 'twelve',
'aliases': 'twelve',
'default': False,
'deprecated': False,
'policy_type': EC_POLICY,
'ec_type': DEFAULT_TEST_EC_TYPE,
'ec_num_data_fragments': 10,
'ec_num_parity_fragments': 3,
'ec_object_segment_size': DEFAULT_EC_OBJECT_SEGMENT_SIZE,
'ec_duplication_factor': 2,
},
(12, False): {
'name': 'twelve',
'aliases': 'twelve',
},
}
self.maxDiff = None
for policy in policies:
expected_info = expected[(int(policy), True)]
self.assertEqual(policy.get_info(config=True), expected_info)
expected_info = expected[(int(policy), False)]
self.assertEqual(policy.get_info(config=False), expected_info)
def test_ec_fragment_size_cached(self):
policy = ECStoragePolicy(
0, 'ec2-1', ec_type=DEFAULT_TEST_EC_TYPE,
ec_ndata=2, ec_nparity=1, object_ring=FakeRing(replicas=3),
ec_segment_size=DEFAULT_EC_OBJECT_SEGMENT_SIZE, is_default=True)
ec_driver = ECDriver(ec_type=DEFAULT_TEST_EC_TYPE,
k=2, m=1)
expected_fragment_size = ec_driver.get_segment_info(
DEFAULT_EC_OBJECT_SEGMENT_SIZE,
DEFAULT_EC_OBJECT_SEGMENT_SIZE)['fragment_size']
with mock.patch.object(
policy.pyeclib_driver, 'get_segment_info') as fake:
fake.return_value = {
'fragment_size': expected_fragment_size}
for x in range(10):
self.assertEqual(expected_fragment_size,
policy.fragment_size)
# pyeclib_driver.get_segment_info is called only once
self.assertEqual(1, fake.call_count)
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2016 Huawei Technologies India Pvt. Ltd.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import sys
from unittest import mock
from neutronclient.common import exceptions
from neutronclient.neutron.v2_0.bgp import speaker as bgp_speaker
from neutronclient.tests.unit import test_cli20
class CLITestV20BGPSpeakerJSON(test_cli20.CLITestV20Base):
non_admin_status_resources = ['bgp_speaker']
def test_create_bgp_speaker_with_minimal_options(self):
# Create BGP Speaker with mandatory params.
resource = 'bgp_speaker'
cmd = bgp_speaker.CreateSpeaker(test_cli20.MyApp(sys.stdout),
None)
name = 'my-name'
my_id = 'my-id'
local_asnum = '1'
args = [name, '--local-as', local_asnum, ]
position_names = ['name', 'local_as', 'ip_version']
position_values = [name, local_asnum, 4]
self._test_create_resource(resource, cmd, name, my_id, args,
position_names, position_values)
def test_create_ipv4_bgp_speaker_with_all_params(self):
# Create BGP Speaker with all params.
resource = 'bgp_speaker'
cmd = bgp_speaker.CreateSpeaker(test_cli20.MyApp(sys.stdout),
None)
name = 'my-name'
my_id = 'my-id'
local_asnum = '1'
args = [name,
'--local-as', local_asnum,
'--ip-version', '4',
'--advertise-floating-ip-host-routes', 'True',
'--advertise-tenant-networks', 'True']
position_names = ['name', 'local_as', 'ip_version',
'advertise_floating_ip_host_routes',
'advertise_tenant_networks']
position_values = [name, local_asnum, 4, 'True', 'True']
self._test_create_resource(resource, cmd, name, my_id, args,
position_names, position_values)
def test_create_ipv6_bgp_speaker_with_all_params(self):
# Create BGP Speaker with all params.
resource = 'bgp_speaker'
cmd = bgp_speaker.CreateSpeaker(test_cli20.MyApp(sys.stdout),
None)
name = 'my-name'
my_id = 'my-id'
local_asnum = '65535'
args = [name,
'--local-as', local_asnum,
'--ip-version', '6',
'--advertise-floating-ip-host-routes', 'True',
'--advertise-tenant-networks', 'True']
position_names = ['name', 'local_as', 'ip_version',
'advertise_floating_ip_host_routes',
'advertise_tenant_networks']
position_values = [name, local_asnum, 6, 'True', 'True']
self._test_create_resource(resource, cmd, name, my_id, args,
position_names, position_values)
def test_create_bgp_speaker_with_invalid_min_local_asnum(self):
# Create BGP Speaker with invalid minimum local-asnum.
resource = 'bgp_speaker'
cmd = bgp_speaker.CreateSpeaker(test_cli20.MyApp(sys.stdout),
None)
name = 'my-name'
my_id = 'my-id'
local_asnum = '0'
args = [name,
'--local-as', local_asnum]
position_names = ['name', 'local_as']
position_values = [name, local_asnum]
exc = self.assertRaises(exceptions.CommandError,
self._test_create_resource,
resource, cmd, name, my_id, args,
position_names, position_values)
self.assertEqual('local-as "0" should be an integer [%s:%s].' %
(bgp_speaker.MIN_AS_NUM, bgp_speaker.MAX_AS_NUM),
str(exc))
def test_create_bgp_speaker_with_invalid_max_local_asnum(self):
# Create BGP Speaker with invalid maximum local-asnum.
resource = 'bgp_speaker'
cmd = bgp_speaker.CreateSpeaker(test_cli20.MyApp(sys.stdout),
None)
name = 'my-name'
my_id = 'my-id'
local_asnum = '4294967296'
args = [name,
'--local-as', local_asnum]
position_names = ['name', 'local_as', ]
position_values = [name, local_asnum, ]
exc = self.assertRaises(exceptions.CommandError,
self._test_create_resource,
resource, cmd, name, my_id, args,
position_names, position_values)
self.assertEqual('local-as "4294967296" should be an '
'integer [%s:%s].' %
(bgp_speaker.MIN_AS_NUM, bgp_speaker.MAX_AS_NUM),
str(exc))
def test_update_bgp_speaker(self):
# Update BGP Speaker:
# myid --advertise-tenant-networks True
# --advertise-floating-ip-host-routes False
resource = 'bgp_speaker'
cmd = bgp_speaker.UpdateSpeaker(test_cli20.MyApp(sys.stdout),
None)
self._test_update_resource(resource, cmd, 'myid',
['myid',
'--name', 'new-name',
'--advertise-tenant-networks', 'True',
'--advertise-floating-ip-host-routes',
'False'],
{'name': 'new-name',
'advertise_tenant_networks': 'True',
'advertise_floating_ip_host_routes':
'False'})
def test_update_bgp_speaker_exception(self):
# Update BGP Speaker: myid.
resource = 'bgp_speaker'
cmd = bgp_speaker.UpdateSpeaker(test_cli20.MyApp(sys.stdout),
None)
self.assertRaises(exceptions.CommandError,
self._test_update_resource,
resource, cmd, 'myid', ['myid'], {})
def test_list_bgp_speaker(self):
# List all BGP Speakers.
resources = "bgp_speakers"
cmd = bgp_speaker.ListSpeakers(test_cli20.MyApp(sys.stdout),
None)
self._test_list_resources(resources, cmd, True)
@mock.patch.object(bgp_speaker.ListSpeakers, "extend_list")
def test_list_bgp_speaker_pagination(self, mock_extend_list):
# List all BGP Speakers with pagination support.
cmd = bgp_speaker.ListSpeakers(test_cli20.MyApp(sys.stdout),
None)
self._test_list_resources_with_pagination("bgp_speakers",
cmd)
mock_extend_list.assert_called_once_with(test_cli20.IsA(list),
mock.ANY)
def test_list_bgp_speaker_sort(self):
# sorted list: bgp-speaker-list --sort-key name --sort-key id
# --sort-key asc --sort-key desc
resources = "bgp_speakers"
cmd = bgp_speaker.ListSpeakers(test_cli20.MyApp(sys.stdout),
None)
self._test_list_resources(resources, cmd,
sort_key=["name", "id"],
sort_dir=["asc", "desc"])
def test_list_bgp_speaker_limit(self):
# size (1000) limited list: bgp-speaker-list -P.
resources = "bgp_speakers"
cmd = bgp_speaker.ListSpeakers(test_cli20.MyApp(sys.stdout),
None)
self._test_list_resources(resources, cmd, page_size=1000)
def test_show_bgp_speaker(self):
# Show BGP Speaker: --fields id --fields name myid.
resource = 'bgp_speaker'
cmd = bgp_speaker.ShowSpeaker(test_cli20.MyApp(sys.stdout),
None)
args = ['--fields', 'id', '--fields', 'name', self.test_id]
self._test_show_resource(resource, cmd, self.test_id, args,
['id', 'name'])
def test_delete_bgp_speaker(self):
# Delete BGP Speaker: bgp_speaker_id.
resource = 'bgp_speaker'
cmd = bgp_speaker.DeleteSpeaker(test_cli20.MyApp(sys.stdout),
None)
myid = 'myid'
args = [myid]
self._test_delete_resource(resource, cmd, myid, args)
def _test_add_remove_peer(self, action, cmd, args):
"""Add or Remove BGP Peer to/from a BGP Speaker."""
resource = 'bgp_speaker'
subcmd = '%s_bgp_peer' % action
body = {'bgp_peer_id': 'peerid'}
if action == 'add':
retval = {'bgp_peer': 'peerid'}
retval = self.client.serialize(retval)
expected_code = 200
else:
retval = None
expected_code = 204
self._test_update_resource_action(resource, cmd, 'myid',
subcmd, args, body, expected_code,
retval)
def test_add_peer_to_bgp_speaker(self):
# Add peer to BGP speaker: myid peer_id=peerid
cmd = bgp_speaker.AddPeerToSpeaker(test_cli20.MyApp(sys.stdout),
None)
args = ['myid', 'peerid']
self._test_add_remove_peer('add', cmd, args)
def test_remove_peer_from_bgp_speaker(self):
# Remove peer from BGP speaker: myid peer_id=peerid
cmd = bgp_speaker.RemovePeerFromSpeaker(test_cli20.MyApp(sys.stdout),
None)
args = ['myid', 'peerid']
self._test_add_remove_peer('remove', cmd, args)
def _test_add_remove_network(self, action, cmd, args):
# Add or Remove network to/from a BGP Speaker.
resource = 'bgp_speaker'
subcmd = '%s_gateway_network' % action
body = {'network_id': 'netid'}
if action == 'add':
retval = {'network': 'netid'}
retval = self.client.serialize(retval)
expected_code = 200
else:
retval = None
expected_code = 204
self._test_update_resource_action(resource, cmd, 'myid',
subcmd, args, body, expected_code,
retval)
def test_add_network_to_bgp_speaker(self):
# Add peer to BGP speaker: myid network_id=netid
cmd = bgp_speaker.AddNetworkToSpeaker(test_cli20.MyApp(sys.stdout),
None)
args = ['myid', 'netid']
self._test_add_remove_network('add', cmd, args)
def test_remove_network_from_bgp_speaker(self):
# Remove network from BGP speaker: myid network_id=netid
cmd = bgp_speaker.RemoveNetworkFromSpeaker(
test_cli20.MyApp(sys.stdout), None)
args = ['myid', 'netid']
self._test_add_remove_network('remove', cmd, args)
def test_list_routes_advertised_by_a_bgp_speaker(self):
# Retrieve advertised route list
resources = 'advertised_routes'
cmd = bgp_speaker.ListRoutesAdvertisedBySpeaker(
test_cli20.MyApp(sys.stdout), None)
bs_id = 'bgp_speaker_id1'
path = ((self.client.bgp_speaker_path + '/get_advertised_routes') %
bs_id)
self._test_list_resources(resources, cmd, base_args=[bs_id],
path=path)
|
|
import datetime
from dateutil.relativedelta import relativedelta
from six.moves.urllib.parse import urlencode
from django.contrib.auth.models import Permission
from django.core.urlresolvers import reverse
from django.test import TestCase
from timepiece import utils
from timepiece.tests.base import ViewTestMixin
from timepiece.tests import factories
from timepiece.entries.models import Entry, ProjectHours
from timepiece.entries.views import Dashboard
class DashboardViewTestCase(ViewTestMixin, TestCase):
"""Tests the data that is passed to the dashboard template."""
def setUp(self):
self.today = datetime.date(2012, 11, 7)
self.this_week = utils.get_week_start(self.today)
self.next_week = self.this_week + relativedelta(days=7)
get_params = {'week_start': self.this_week.strftime('%Y-%m-%d')}
self.url = reverse('dashboard') + '?' + urlencode(get_params)
self.user = factories.User()
self.permission = Permission.objects.get(codename='can_clock_in')
self.user.user_permissions.add(self.permission)
self.login_user(self.user)
self.project = factories.Project()
self.activity = factories.Activity()
self.location = factories.Location()
self.status = Entry.UNVERIFIED
def _create_entry(self, start_time, end_time=None, user=None):
"""
Creates an entry using default values. If end time is not given, the
entry is considered active.
"""
data = {
'user': user or self.user,
'project': self.project,
'activity': self.activity,
'location': self.location,
'status': self.status,
'start_time': start_time,
}
if end_time:
data['end_time'] = end_time
return factories.Entry(**data)
def _create_active_entry(self):
start_time = datetime.datetime(2012, 11, 9, 0)
return self._create_entry(start_time)
def _create_entries(self):
count = 5
start_time = datetime.datetime(2012, 11, 5, 8)
end_time = datetime.datetime(2012, 11, 5, 12)
for i in range(count):
start_time = end_time + relativedelta(seconds=1)
end_time += relativedelta(hours=4)
self._create_entry(start_time, end_time)
return count
def _create_others_entries(self):
count = 5
start_time = datetime.datetime(2012, 11, 6, 12)
for i in range(count):
user = factories.User()
self._create_entry(start_time, user=user)
return count
def test_unauthenticated_user(self):
"""Unauthenticated users should be redirected to login view."""
self.client.logout()
response = self.client.get(self.url)
self.assertEqual(response.status_code, 302)
def test_unprivileged_user(self):
"""Unprivileged users should not see any content."""
self.user.user_permissions.remove(self.permission)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
# TODO: better test for whether this is working.
def test_get(self):
"""Get without param gets entries for this week."""
response = self.client.get(reverse('dashboard'))
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.context['week_start'],
utils.get_week_start().date())
self.assertEqual(
response.context['week_end'],
utils.get_week_start().date() + relativedelta(days=6))
def test_active_entry(self):
"""Active entry should be given if it exists."""
active_entry = self._create_active_entry()
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['active_entry'], active_entry)
def test_no_active_entry(self):
"""Active entry should be None if it doesn't exist."""
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['active_entry'], None)
def test_weeks_entries(self):
"""Week's entries list should include active entry."""
entry_count = self._create_entries()
active_entry = self._create_active_entry()
entry_count += 1
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertTrue(active_entry in response.context['week_entries'])
self.assertEqual(len(response.context['week_entries']), entry_count)
def test_no_weeks_entries(self):
"""Week's entries list should be empty if no entries this week."""
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['week_entries']), 0)
def test_other_active_entries(self):
"""Others' entries list should exclude this user's active entry."""
entry_count = self._create_others_entries()
active_entry = self._create_active_entry()
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
others_active_entries = response.context['others_active_entries']
self.assertFalse(active_entry in others_active_entries)
self.assertEqual(len(others_active_entries), entry_count)
def test_no_other_active_entries(self):
"""Others' entries list should be empty if no other active entries."""
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['others_active_entries']), 0)
class ProcessProgressTestCase(TestCase):
"""Tests for process_progress."""
def setUp(self):
self.today = datetime.date(2012, 11, 7)
self.this_week = utils.get_week_start(self.today)
self.next_week = self.this_week + relativedelta(days=7)
self.user = factories.User()
self.project = factories.Project()
self.activity = factories.Activity()
self.location = factories.Location()
self.status = Entry.UNVERIFIED
def _create_entry(self, start_time, end_time=None, project=None):
data = {
'user': self.user,
'project': project or self.project,
'activity': self.activity,
'location': self.location,
'status': self.status,
'start_time': start_time,
}
if end_time:
data['end_time'] = end_time
return factories.Entry(**data)
def _create_hours(self, hours, project=None):
data = {
'user': self.user,
'project': project or self.project,
'week_start': self.this_week,
'hours': hours,
}
return factories.ProjectHours(**data)
def _get_progress(self):
entries = Entry.objects.all()
assignments = ProjectHours.objects.all()
view = Dashboard()
return view.process_progress(entries, assignments)
def _check_progress(self, progress, project, assigned, worked):
self.assertEqual(progress['project'], project)
self.assertEqual(progress['assigned'], assigned)
self.assertEqual(progress['worked'], worked)
def test_progress(self):
"""Progress when work has been done for an assigned project."""
start_time = datetime.datetime(2012, 11, 7, 8, 0)
end_time = datetime.datetime(2012, 11, 7, 12, 0)
self._create_entry(start_time, end_time)
worked_hours = 4
assigned_hours = 5
self._create_hours(assigned_hours)
progress = self._get_progress()
self.assertEqual(len(progress), 1)
self._check_progress(
progress[0], self.project, assigned_hours, worked_hours)
def test_work_with_no_assignment(self):
"""Progress when work has been done on an unassigned project."""
start_time = datetime.datetime(2012, 11, 7, 8, 0)
end_time = datetime.datetime(2012, 11, 7, 12, 0)
self._create_entry(start_time, end_time)
worked_hours = 4
progress = self._get_progress()
self.assertEqual(len(progress), 1)
self._check_progress(progress[0], self.project, 0, worked_hours)
def test_assignment_with_no_work(self):
"""Progress when no work has been done on an assigned project."""
assigned_hours = 5
self._create_hours(assigned_hours)
progress = self._get_progress()
self.assertEqual(len(progress), 1)
self._check_progress(progress[0], self.project, assigned_hours, 0)
def test_ordering(self):
"""Progress list should be ordered by project name."""
projects = [
factories.Project(name='a'),
factories.Project(name='b'),
factories.Project(name='c'),
]
for i in range(3):
start_time = datetime.datetime(2012, 11, 5 + i, 8, 0)
end_time = datetime.datetime(2012, 11, 5 + i, 12, 0)
self._create_entry(start_time, end_time, projects[i])
self._create_hours(5 + 5 * i, projects[i])
progress = self._get_progress()
self.assertEqual(len(progress), 3)
self.assertEqual(progress[0]['project'], projects[0])
self.assertEqual(progress[1]['project'], projects[1])
self.assertEqual(progress[2]['project'], projects[2])
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# this file is part of 'RAX-AutoScaler'
#
# Copyright 2014 Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import requests
import requests.exceptions
import pyrax
import pyrax.exceptions
from raxas import common
from raxas import enums
class ScalingGroup(object):
def __init__(self, config, group_name):
"""Instantiate a ScalingGroup object.
:param config: Configuration dict for this scaling group
:param group_name: Name of the group to print in the log file
"""
self._group_name = group_name
self._config = self.check_config(config)
self._scaling_group = None
self._servers_state = None
self._active_servers = None
@classmethod
def check_config(cls, config):
logger = common.get_logger()
if None in [
config.get('group_id'),
config.get('scale_up_policy'),
config.get('scale_down_policy')]:
common.exit_with_error('Invalid group configuration')
if None in [
config.get('plugins')]:
logger.warn('DeprecationWarning: You are using a deprecated config file please update'
' your configuration file to v0.3 standard.')
return config
else:
return config
@property
def plugin_config(self):
if self._config.get('plugins') is None:
self._config['plugins'] = \
{
'raxmon':
{
'scale_up_threshold': self._config.get('scale_up_threshold', 0.6),
'scale_down_threshold': self._config.get('scale_down_threshold', 0.4),
'check_config': self._config.get('check_config', '{}'),
'metric_name': self._config.get('metric_name', '1m'),
'check_type': self._config.get('check_type', 'agent.load_average')
}
}
return self._config.get('plugins')
@property
def group_uuid(self):
return self.get_group_value('group_id')
@property
def launch_config(self):
try:
return self.scaling_group.get_launch_config()
except AttributeError:
return None
@property
def scaling_group(self):
if self._scaling_group is None:
logger = common.get_logger()
autoscale_api = pyrax.autoscale
try:
self._scaling_group = autoscale_api.get(self.group_uuid)
except pyrax.exc.PyraxException as error:
logger.error('Error: Unable to get scaling group \'%s\': %s',
self.group_uuid, error)
return None
else:
return self._scaling_group
else:
return self._scaling_group
@property
def state(self):
if self._servers_state is None:
try:
self._servers_state = self.scaling_group.get_state()
except AttributeError:
return None
else:
return self._servers_state
else:
return self._servers_state
@property
def active_servers(self):
if self._active_servers is None:
try:
self._active_servers = self.state.get('active', [])
except AttributeError:
return []
else:
return self._active_servers
else:
return self._active_servers
@property
def is_master(self):
"""This property checks scaling group state and determines if this node is a master.
:returns: enums.NodeStatus
"""
logger = common.get_logger()
masters = []
node_id = common.get_machine_uuid(self)
if node_id is None:
logger.error('Failed to get server uuid')
return enums.NodeStatus.Unknown
active_count = len(self.active_servers)
if active_count == 1:
masters.append(self.active_servers[0])
elif active_count > 1:
masters.append(self.active_servers[0])
masters.append(self.active_servers[1])
else:
logger.error('Unknown cluster state')
return enums.NodeStatus.Unknown
if node_id in masters:
logger.info('Node is a master, continuing')
return enums.NodeStatus.Master
else:
logger.info('Node is not a master, nothing to do. Exiting')
return enums.NodeStatus.Slave
def get_group_value(self, key):
"""This function returns value in autoscale_groups section associated with
provided key.
:param key: key name
:returns: value associated with key
"""
logger = common.get_logger()
value = self._config.get(key.lower())
if value is None:
logger.error('Error: unable to get value for key "%s" in group "%s"',
key, self._group_name)
return value
def get_webhook_values(self, policy, hook):
"""This function returns value in webhooks section of json file which is
associated with provided key.
:param policy: raxas.enums.ScaleDirection
:param hook: raxas.enums.HookType
:returns: value associated with key
"""
logger = common.get_logger()
policy = 'scale_%s' % policy.name.lower()
hook = hook.name.lower()
try:
return self._config['webhooks'][policy][hook]
except KeyError:
logger.error('Error: unable to get config value for '
'[\'%s\'][\'webhooks\'][\'%s\'][\'%s\']',
self._group_name, policy, hook)
return None
def execute_webhook(self, policy, hook):
"""This function makes webhook calls.
:param policy: raxas.enums.ScaleDirection
:param hook: raxas.enums.HookType
"""
logger = common.get_logger()
logger.info('Executing webhook: scale_%s:%s', policy.name, hook.name)
urls = self.get_webhook_values(policy, hook)
data = json.dumps(self._config)
for url in urls:
logger.info('Sending POST request to url: \'%s\'', url)
try:
response = requests.post(url, json=data)
logger.info('Received status code %d from url: \'%s\'', response.status_code, url)
except requests.exceptions.RequestException as error:
logger.error(error)
def execute_policy(self, policy):
"""
:param policy: raxas.enums.ScaleDirection
:returns: True
False
"""
logger = common.get_logger()
policy_id = self.get_group_value('scale_%s_policy' % policy.name)
if len(self.active_servers) == 1 and policy == enums.ScaleDirection.Down:
logger.info('Current active server count is 1, will not scale down')
return enums.ScaleEvent.NoAction
try:
self.scaling_group.get_policy(policy_id).execute()
except pyrax.exceptions.PyraxException as error:
logger = common.get_logger()
logger.error('Error scaling %s: %s', policy.name, error)
return enums.ScaleEvent.Error
else:
return enums.ScaleEvent.Success
|
|
import mock
import pytest
from urlparse import urlparse
from addons.wiki.tests.factories import NodeWikiFactory
from api.base.settings.defaults import API_BASE
from api.base.settings import osf_settings
from api_tests import utils as test_utils
from framework.auth import core
from osf.models import Guid
from osf_tests.factories import (
ProjectFactory,
AuthUserFactory,
CommentFactory,
RegistrationFactory,
PrivateLinkFactory,
)
from rest_framework import exceptions
@pytest.mark.django_db
class CommentDetailMixin(object):
@pytest.fixture()
def user(self):
return AuthUserFactory()
@pytest.fixture()
def contributor(self):
return AuthUserFactory()
@pytest.fixture()
def non_contrib(self):
return AuthUserFactory()
# check if all necessary fixtures are setup by subclass
@pytest.fixture()
def private_project(self):
raise NotImplementedError
@pytest.fixture()
def comment(self):
raise NotImplementedError
@pytest.fixture()
def private_url(self):
raise NotImplementedError
@pytest.fixture()
def payload(self):
raise NotImplementedError
# public_project_with_comments
@pytest.fixture()
def public_project(self):
raise NotImplementedError
@pytest.fixture()
def public_comment(self):
raise NotImplementedError
@pytest.fixture()
def public_comment_reply(self):
raise NotImplementedError
@pytest.fixture()
def public_url(self):
raise NotImplementedError
@pytest.fixture()
def public_comment_payload(self):
raise NotImplementedError
# registration_with_comments
@pytest.fixture()
def registration(self):
raise NotImplementedError
@pytest.fixture()
def registration_url(self):
raise NotImplementedError
@pytest.fixture()
def registration_comment(self):
raise NotImplementedError
@pytest.fixture()
def comment_url(self):
raise NotImplementedError
@pytest.fixture()
def registration_comment_reply(self):
raise NotImplementedError
@pytest.fixture()
def replies_url(self):
raise NotImplementedError
@pytest.fixture()
def set_up_payload(self):
def payload(target_id, content='test', has_content=True):
payload = {
'data': {
'id': target_id,
'type': 'comments',
'attributes': {
'content': 'Updating this comment',
'deleted': False
}
}
}
if has_content:
payload['data']['attributes']['content'] = content
return payload
return payload
def test_private_node_comments_related_auth(self, app, user, contributor, non_contrib, comment, private_url):
# test_private_node_logged_in_contributor_can_view_comment
res = app.get(private_url, auth=user.auth)
assert res.status_code == 200
assert comment._id == res.json['data']['id']
assert comment.content == res.json['data']['attributes']['content']
# def test_private_node_logged_in_non_contrib_cannot_view_comment
res = app.get(private_url, auth=non_contrib.auth, expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
# def test_private_node_logged_out_user_cannot_view_comment
res = app.get(private_url, expect_errors=True)
assert res.status_code == 401
assert res.json['errors'][0]['detail'] == exceptions.NotAuthenticated.default_detail
def test_private_node_user_with_private_and_anonymous_link_misc(self, app, private_project, comment):
# def test_private_node_user_with_private_link_can_see_comment
private_link = PrivateLinkFactory(anonymous=False)
private_link.nodes.add(private_project)
private_link.save()
res = app.get('/{}comments/{}/'.format(API_BASE, comment._id), {'view_only': private_link.key}, expect_errors=True)
assert res.status_code == 200
assert comment._id == res.json['data']['id']
assert comment.content == res.json['data']['attributes']['content']
# test_private_node_user_with_anonymous_link_cannot_see_commenter_info
private_link = PrivateLinkFactory(anonymous=True)
private_link.nodes.add(private_project)
private_link.save()
res = app.get('/{}comments/{}/'.format(API_BASE, comment._id), {'view_only': private_link.key})
assert res.status_code == 200
assert comment._id == res.json['data']['id']
assert comment.content == res.json['data']['attributes']['content']
assert 'user' not in res.json['data']['relationships']
# test_private_node_user_with_anonymous_link_cannot_see_mention_info
comment.content = 'test with [@username](userlink) and @mention'
comment.save()
res = app.get('/{}comments/{}/'.format(API_BASE, comment._id), {'view_only': private_link.key})
assert res.status_code == 200
assert comment._id == res.json['data']['id']
assert 'test with @A User and @mention' == res.json['data']['attributes']['content']
def test_public_node_comment_auth_misc(self, app, user, non_contrib, public_project, public_url, public_comment, registration_comment, comment_url):
# test_public_node_logged_in_contributor_can_view_comment
res = app.get(public_url, auth=user.auth)
assert res.status_code == 200
assert public_comment._id == res.json['data']['id']
assert public_comment.content == res.json['data']['attributes']['content']
# test_public_node_logged_in_non_contrib_can_view_comment
res = app.get(public_url, auth=non_contrib.auth)
assert res.status_code == 200
assert public_comment._id == res.json['data']['id']
assert public_comment.content == res.json['data']['attributes']['content']
# test_public_node_logged_out_user_can_view_comment
res = app.get(public_url)
assert res.status_code == 200
assert public_comment._id == res.json['data']['id']
assert public_comment.content == res.json['data']['attributes']['content']
# test_registration_logged_in_contributor_can_view_comment
res = app.get(comment_url, auth=user.auth)
assert res.status_code == 200
assert registration_comment._id == res.json['data']['id']
assert registration_comment.content == res.json['data']['attributes']['content']
# test_public_node_user_with_private_link_can_view_comment
private_link = PrivateLinkFactory(anonymous=False)
private_link.nodes.add(public_project)
private_link.save()
res = app.get('/{}comments/{}/'.format(API_BASE, public_comment._id), {'view_only': private_link.key}, expect_errors=True)
assert public_comment._id == res.json['data']['id']
assert public_comment.content == res.json['data']['attributes']['content']
def test_comment_has_multiple_links(self, app, user, public_url, public_project, public_comment, public_comment_reply, comment_url, registration):
res = app.get(public_url)
assert res.status_code == 200
# test_comment_has_user_link
url_user = res.json['data']['relationships']['user']['links']['related']['href']
expected_url = '/{}users/{}/'.format(API_BASE, user._id)
assert urlparse(url_user).path == expected_url
# test_comment_has_node_link
url_node = res.json['data']['relationships']['node']['links']['related']['href']
expected_url = '/{}nodes/{}/'.format(API_BASE, public_project._id)
assert urlparse(url_node).path == expected_url
# test_comment_has_replies_link
url_replies = res.json['data']['relationships']['replies']['links']['related']['href']
uri = test_utils.urlparse_drop_netloc(url_replies)
res_uri = app.get(uri)
assert res_uri.status_code == 200
assert res_uri.json['data'][0]['type'] == 'comments'
# test_comment_has_reports_link
url_reports = res.json['data']['relationships']['reports']['links']['related']['href']
expected_url = '/{}comments/{}/reports/'.format(API_BASE, public_comment._id)
assert urlparse(url_reports).path == expected_url
# test_registration_comment_has_node_link
res = app.get(comment_url, auth=user.auth)
url = res.json['data']['relationships']['node']['links']['related']['href']
expected_url = '/{}registrations/{}/'.format(API_BASE, registration._id)
assert res.status_code == 200
assert urlparse(url).path == expected_url
def test_private_node_comment_auth_misc(self, app, user, non_contrib, private_url, payload):
# test_private_node_only_logged_in_contributor_commenter_can_update_comment
res = app.put_json_api(private_url, payload, auth=user.auth)
assert res.status_code == 200
assert payload['data']['attributes']['content'] == res.json['data']['attributes']['content']
# test_private_node_logged_in_non_contrib_cannot_update_comment
res = app.put_json_api(private_url, payload, auth=non_contrib.auth, expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
# test_private_node_logged_out_user_cannot_update_comment
res = app.put_json_api(private_url, payload, expect_errors=True)
assert res.status_code == 401
assert res.json['errors'][0]['detail'] == exceptions.NotAuthenticated.default_detail
def test_public_node_comment_auth_misc(self, app, user, contributor, non_contrib, public_url, public_comment, public_comment_payload):
# test_public_node_only_contributor_commenter_can_update_comment
res = app.put_json_api(public_url, public_comment_payload, auth=user.auth)
assert res.status_code == 200
assert public_comment_payload['data']['attributes']['content'] == res.json['data']['attributes']['content']
# test_public_node_contributor_cannot_update_other_users_comment
res = app.put_json_api(public_url, public_comment_payload, auth=contributor.auth, expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
# test_public_node_non_contrib_cannot_update_other_users_comment
res = app.put_json_api(public_url, public_comment_payload, auth=non_contrib.auth, expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
# test_public_node_logged_out_user_cannot_update_comment
res = app.put_json_api(public_url, public_comment_payload, expect_errors=True)
assert res.status_code == 401
assert res.json['errors'][0]['detail'] == exceptions.NotAuthenticated.default_detail
def test_update_comment_misc(self, app, user, private_url, comment, set_up_payload):
# test_update_comment_cannot_exceed_max_length
content = ('c' * (osf_settings.COMMENT_MAXLENGTH + 3))
payload = set_up_payload(comment._id, content=content)
res = app.put_json_api(private_url, payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert (res.json['errors'][0]['detail'] ==
'Ensure this field has no more than {} characters.'.format(str(osf_settings.COMMENT_MAXLENGTH)))
# test_update_comment_cannot_be_empty
payload = set_up_payload(comment._id, content='')
res = app.put_json_api(private_url, payload, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field may not be blank.'
def test_private_node_only_logged_in_contributor_commenter_can_delete_comment(self, app, user, private_url):
res = app.delete_json_api(private_url, auth=user.auth)
assert res.status_code == 204
def test_private_node_only_logged_in_contributor_commenter_can_delete_own_reply(self, app, user, private_project, comment):
reply_target = Guid.load(comment._id)
reply = CommentFactory(node=private_project, target=reply_target, user=user)
reply_url = '/{}comments/{}/'.format(API_BASE, reply._id)
res = app.delete_json_api(reply_url, auth=user.auth)
assert res.status_code == 204
def test_private_node_only_logged_in_contributor_commenter_can_undelete_own_reply(self, app, user, private_project, comment, set_up_payload):
reply_target = Guid.load(comment._id)
reply = CommentFactory(node=private_project, target=reply_target, user=user)
reply_url = '/{}comments/{}/'.format(API_BASE, reply._id)
reply.is_deleted = True
reply.save()
payload = set_up_payload(reply._id, has_content=False)
res = app.patch_json_api(reply_url, payload, auth=user.auth)
assert res.status_code == 200
assert not res.json['data']['attributes']['deleted']
assert res.json['data']['attributes']['content'] == reply.content
def test_private_node_cannot_delete_comment_situation(self, app, user, contributor, non_contrib, private_url, comment):
# def test_private_node_contributor_cannot_delete_other_users_comment(self):
res = app.delete_json_api(private_url, auth=contributor.auth, expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
# def test_private_node_non_contrib_cannot_delete_comment(self):
res = app.delete_json_api(private_url, auth=non_contrib.auth, expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
# def test_private_node_logged_out_user_cannot_delete_comment(self):
res = app.delete_json_api(private_url, expect_errors=True)
assert res.status_code == 401
# def test_private_node_user_cannot_delete_already_deleted_comment(self):
comment.is_deleted = True
comment.save()
res = app.delete_json_api(private_url, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Comment already deleted.'
def test_private_node_only_logged_in_contributor_commenter_can_undelete_comment(self, app, user, comment, set_up_payload):
comment.is_deleted = True
comment.save()
url = '/{}comments/{}/'.format(API_BASE, comment._id)
payload = set_up_payload(comment._id, has_content=False)
res = app.patch_json_api(url, payload, auth=user.auth)
assert res.status_code == 200
assert not res.json['data']['attributes']['deleted']
assert res.json['data']['attributes']['content'] == comment.content
def test_private_node_cannot_undelete_comment_situation(self, app, user, contributor, non_contrib, comment, set_up_payload):
comment.is_deleted = True
comment.save()
url = '/{}comments/{}/'.format(API_BASE, comment._id)
payload = set_up_payload(comment._id, has_content=False)
# test_private_node_contributor_cannot_undelete_other_users_comment
res = app.patch_json_api(url, payload, auth=contributor.auth, expect_errors=True)
assert res.status_code == 403
# test_private_node_non_contrib_cannot_undelete_comment
res = app.patch_json_api(url, payload, auth=non_contrib.auth, expect_errors=True)
assert res.status_code == 403
# test_private_node_logged_out_user_cannot_undelete_comment
res = app.patch_json_api(url, payload, expect_errors=True)
assert res.status_code == 401
def test_public_node_only_logged_in_contributor_commenter_can_delete_comment(self, app, user, public_url):
res = app.delete_json_api(public_url, auth=user.auth)
assert res.status_code == 204
def test_public_node_cannot_delete_comment_situations(self, app, user, contributor, non_contrib, public_url, public_comment):
# test_public_node_contributor_cannot_delete_other_users_comment
res = app.delete_json_api(public_url, auth=contributor.auth, expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
# test_public_node_non_contrib_cannot_delete_other_users_comment
res = app.delete_json_api(public_url, auth=non_contrib.auth, expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
# test_public_node_logged_out_user_cannot_delete_comment
res = app.delete_json_api(public_url, expect_errors=True)
assert res.status_code == 401
assert res.json['errors'][0]['detail'] == exceptions.NotAuthenticated.default_detail
# test_public_node_user_cannot_delete_already_deleted_comment
public_comment.is_deleted = True
public_comment.save()
res = app.delete_json_api(public_url, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Comment already deleted.'
def test_private_node_deleted_comment_auth_misc(self, app, user, contributor, comment, private_project):
comment.is_deleted = True
comment.save()
# test_private_node_only_logged_in_commenter_can_view_deleted_comment
url = '/{}comments/{}/'.format(API_BASE, comment._id)
res = app.get(url, auth=user.auth)
assert res.status_code == 200
assert res.json['data']['attributes']['content'] == comment.content
# test_private_node_contributor_cannot_see_other_users_deleted_comment
url = '/{}comments/{}/'.format(API_BASE, comment._id)
res = app.get(url, auth=contributor.auth)
assert res.status_code == 200
assert res.json['data']['attributes']['content'] is None
# test_private_node_logged_out_user_cannot_see_deleted_comment
url = '/{}comments/{}/'.format(API_BASE, comment._id)
res = app.get(url, expect_errors=True)
assert res.status_code == 401
assert res.json['errors'][0]['detail'] == exceptions.NotAuthenticated.default_detail
# test_private_node_view_only_link_user_cannot_see_deleted_comment
private_link = PrivateLinkFactory(anonymous=False)
private_link.nodes.add(private_project)
private_link.save()
res = app.get('/{}comments/{}/'.format(API_BASE, comment._id), {'view_only': private_link.key}, expect_errors=True)
assert res.status_code == 200
assert res.json['data']['attributes']['content'] is None
# test_private_node_anonymous_view_only_link_user_cannot_see_deleted_comment
anonymous_link = PrivateLinkFactory(anonymous=True)
anonymous_link.nodes.add(private_project)
anonymous_link.save()
res = app.get('/{}comments/{}/'.format(API_BASE, comment._id), {'view_only': anonymous_link.key}, expect_errors=True)
assert res.status_code == 200
assert res.json['data']['attributes']['content'] is None
def test_public_node_deleted_comments_auth_misc(self, app, user, contributor, non_contrib, public_project, public_comment):
public_comment.is_deleted = True
public_comment.save()
url = '/{}comments/{}/'.format(API_BASE, public_comment._id)
# test_public_node_only_logged_in_commenter_can_view_deleted_comment
res = app.get(url, auth=user.auth)
assert res.status_code == 200
assert res.json['data']['attributes']['content'] == public_comment.content
# test_public_node_contributor_cannot_view_other_users_deleted_comment
res = app.get(url, auth=contributor.auth)
assert res.status_code == 200
assert res.json['data']['attributes']['content'] is None
# test_public_node_non_contrib_cannot_view_other_users_deleted_comment
res = app.get(url, auth=non_contrib.auth)
assert res.status_code == 200
assert res.json['data']['attributes']['content'] is None
# test_public_node_logged_out_user_cannot_view_deleted_comments
res = app.get(url)
assert res.status_code == 200
assert res.json['data']['attributes']['content'] is None
# test_public_node_view_only_link_user_cannot_see_deleted_comment
private_link = PrivateLinkFactory(anonymous=False)
private_link.nodes.add(public_project)
private_link.save()
res = app.get('/{}comments/{}/'.format(API_BASE, public_comment._id), {'view_only': private_link.key}, expect_errors=True)
assert res.status_code == 200
assert res.json['data']['attributes']['content'] is None
class TestCommentDetailView(CommentDetailMixin):
# private_project_with_comments
@pytest.fixture()
def private_project(self, user, contributor):
private_project = ProjectFactory.create(is_public=False, creator=user)
private_project.add_contributor(contributor, save=True)
return private_project
@pytest.fixture()
def comment(self, user, private_project):
return CommentFactory(node=private_project, user=user)
@pytest.fixture()
def private_url(self, comment):
return '/{}comments/{}/'.format(API_BASE, comment._id)
@pytest.fixture()
def payload(self, comment, set_up_payload):
return set_up_payload(comment._id)
# public_project_with_comments
@pytest.fixture()
def public_project(self, user, contributor):
public_project = ProjectFactory.create(is_public=True, creator=user)
public_project.add_contributor(contributor, save=True)
return public_project
@pytest.fixture()
def public_comment(self, user, public_project):
return CommentFactory(node=public_project, user=user)
@pytest.fixture()
def public_comment_reply(self, user, public_comment, public_project):
reply_target = Guid.load(public_comment._id)
return CommentFactory(node=public_project, target=reply_target, user=user)
@pytest.fixture()
def public_url(self, public_comment):
return '/{}comments/{}/'.format(API_BASE, public_comment._id)
@pytest.fixture()
def public_comment_payload(self, public_comment, set_up_payload):
return set_up_payload(public_comment._id)
# registration_with_comments
@pytest.fixture()
def registration(self, user):
return RegistrationFactory(creator=user)
@pytest.fixture()
def registration_url(self, registration):
return '/{}registrations/{}/'.format(API_BASE, registration._id)
@pytest.fixture()
def registration_comment(self, user, registration):
return CommentFactory(node=registration, user=user)
@pytest.fixture()
def comment_url(self, registration_comment):
return '/{}comments/{}/'.format(API_BASE, registration_comment._id)
@pytest.fixture()
def registration_comment_reply(self, user, registration, registration_comment):
reply_target = Guid.load(registration_comment._id)
return CommentFactory(node=registration, target=reply_target, user=user)
@pytest.fixture()
def replies_url(self, registration, registration_comment):
return '/{}registrations/{}/comments/?filter[target]={}'.format(API_BASE, registration._id, registration_comment._id)
def test_comment_has_target_link_with_correct_type(self, app, public_url, public_project):
res = app.get(public_url)
url = res.json['data']['relationships']['target']['links']['related']['href']
expected_url = '/{}nodes/{}/'.format(API_BASE, public_project._id)
target_type = res.json['data']['relationships']['target']['links']['related']['meta']['type']
expected_type = 'nodes'
assert res.status_code == 200
assert urlparse(url).path == expected_url
assert target_type == expected_type
def test_public_node_non_contrib_commenter_can_update_comment(self, app, non_contrib, set_up_payload):
project = ProjectFactory(is_public=True, comment_level='public')
comment = CommentFactory(node=project, user=non_contrib)
url = '/{}comments/{}/'.format(API_BASE, comment._id)
payload = set_up_payload(comment._id)
res = app.put_json_api(url, payload, auth=non_contrib.auth)
assert res.status_code == 200
assert payload['data']['attributes']['content'] == res.json['data']['attributes']['content']
def test_public_node_non_contrib_commenter_cannot_update_own_comment_if_comment_level_private(self, app, non_contrib, set_up_payload):
project = ProjectFactory(is_public=True, comment_level='public')
comment = CommentFactory(node=project, user=non_contrib)
project.comment_level = 'private'
project.save()
url = '/{}comments/{}/'.format(API_BASE, comment._id)
payload = set_up_payload(comment._id)
res = app.put_json_api(url, payload, auth=non_contrib.auth, expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
def test_public_node_non_contrib_commenter_can_delete_comment(self, app, non_contrib):
project = ProjectFactory(is_public=True)
comment = CommentFactory(node=project, user=non_contrib)
url = '/{}comments/{}/'.format(API_BASE, comment._id)
res = app.delete_json_api(url, auth=non_contrib.auth)
assert res.status_code == 204
def test_registration_comment_has_usable_replies_relationship_link(self, app, user, registration_url, registration_comment_reply):
res = app.get(registration_url, auth=user.auth)
assert res.status_code == 200
comments_url = res.json['data']['relationships']['comments']['links']['related']['href']
comments_uri = test_utils.urlparse_drop_netloc(comments_url)
comments_res = app.get(comments_uri, auth=user.auth)
assert comments_res.status_code == 200
replies_url = comments_res.json['data'][0]['relationships']['replies']['links']['related']['href']
replies_uri = test_utils.urlparse_drop_netloc(replies_url)
replies_res = app.get(replies_uri, auth=user.auth)
node_url = comments_res.json['data'][0]['relationships']['node']['links']['related']['href']
node_uri = test_utils.urlparse_drop_netloc(node_url)
assert node_uri == registration_url
def test_registration_comment_has_usable_node_relationship_link(self, app, user, registration, registration_url, registration_comment_reply):
res = app.get(registration_url, auth=user.auth)
assert res.status_code == 200
comments_url = res.json['data']['relationships']['comments']['links']['related']['href']
comments_uri = test_utils.urlparse_drop_netloc(comments_url)
comments_res = app.get(comments_uri, auth=user.auth)
assert comments_res.status_code == 200
node_url = comments_res.json['data'][0]['relationships']['node']['links']['related']['href']
node_uri = test_utils.urlparse_drop_netloc(node_url)
node_res = app.get(node_uri, auth=user.auth)
assert registration._id in node_res.json['data']['id']
class TestFileCommentDetailView(CommentDetailMixin):
# private_project_with_comments
@pytest.fixture()
def private_project(self, user, contributor):
private_project = ProjectFactory.create(is_public=False, creator=user)
private_project.add_contributor(contributor, save=True)
return private_project
@pytest.fixture()
def file(self, user, private_project):
return test_utils.create_test_file(private_project, user)
@pytest.fixture()
def comment(self, user, private_project, file):
return CommentFactory(node=private_project, target=file.get_guid(), user=user)
@pytest.fixture()
def private_url(self, comment):
return '/{}comments/{}/'.format(API_BASE, comment._id)
@pytest.fixture()
def payload(self, comment, set_up_payload):
return set_up_payload(comment._id)
# public_project_with_comments
@pytest.fixture()
def public_project(self, user, contributor):
public_project = ProjectFactory.create(is_public=True, creator=user, comment_level='private')
public_project.add_contributor(contributor, save=True)
return public_project
@pytest.fixture()
def public_file(self, user, public_project):
return test_utils.create_test_file(public_project, user)
@pytest.fixture()
def public_comment(self, user, public_project, public_file):
return CommentFactory(node=public_project, target=public_file.get_guid(), user=user)
@pytest.fixture()
def public_comment_reply(self, user, public_comment, public_project):
reply_target = Guid.load(public_comment._id)
return CommentFactory(node=public_project, target=reply_target, user=user)
@pytest.fixture()
def public_url(self, public_comment):
return '/{}comments/{}/'.format(API_BASE, public_comment._id)
@pytest.fixture()
def public_comment_payload(self, public_comment, set_up_payload):
return set_up_payload(public_comment._id)
# registration_with_comments
@pytest.fixture()
def registration(self, user):
return RegistrationFactory(creator=user, comment_level='private')
@pytest.fixture()
def registration_file(self, user, registration):
return test_utils.create_test_file(registration, user)
@pytest.fixture()
def registration_comment(self, user, registration, registration_file):
return CommentFactory(node=registration, target=registration_file.get_guid(), user=user)
@pytest.fixture()
def comment_url(self, registration_comment):
return '/{}comments/{}/'.format(API_BASE, registration_comment._id)
@pytest.fixture()
def registration_comment_reply(self, user, registration, registration_comment):
reply_target = Guid.load(registration_comment._id)
return CommentFactory(node=registration, target=reply_target, user=user)
def test_file_comment_has_target_link_with_correct_type(self, app, public_url, public_file):
res = app.get(public_url)
url = res.json['data']['relationships']['target']['links']['related']['href']
expected_url = '/{}files/{}/'.format(API_BASE, public_file._id)
target_type = res.json['data']['relationships']['target']['links']['related']['meta']['type']
expected_type = 'files'
assert res.status_code == 200
assert urlparse(url).path == expected_url
assert target_type == expected_type
def test_public_node_non_contrib_commenter_can_update_file_comment(self, app, non_contrib, set_up_payload):
project = ProjectFactory(is_public=True)
test_file = test_utils.create_test_file(project, project.creator)
comment = CommentFactory(node=project, target=test_file.get_guid(), user=non_contrib)
url = '/{}comments/{}/'.format(API_BASE, comment._id)
payload = set_up_payload(comment._id)
res = app.put_json_api(url, payload, auth=non_contrib.auth)
assert res.status_code == 200
assert payload['data']['attributes']['content'] == res.json['data']['attributes']['content']
def test_public_node_non_contrib_commenter_cannot_update_own_file_comment_if_comment_level_private(self, app, non_contrib, set_up_payload):
project = ProjectFactory(is_public=True)
test_file = test_utils.create_test_file(project, project.creator)
comment = CommentFactory(node=project, target=test_file.get_guid(), user=non_contrib)
project.comment_level = 'private'
project.save()
url = '/{}comments/{}/'.format(API_BASE, comment._id)
payload = set_up_payload(comment._id)
res = app.put_json_api(url, payload, auth=non_contrib.auth, expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
def test_public_node_non_contrib_commenter_can_delete_file_comment(self, app, non_contrib):
project = ProjectFactory(is_public=True, comment_level='public')
test_file = test_utils.create_test_file(project, project.creator)
comment = CommentFactory(node=project, target=test_file.get_guid(), user=non_contrib)
url = '/{}comments/{}/'.format(API_BASE, comment._id)
res = app.delete_json_api(url, auth=non_contrib.auth)
assert res.status_code == 204
def test_comment_detail_for_deleted_file_is_not_returned(self, app, user, private_project, file, private_url):
# Delete commented file
osfstorage = private_project.get_addon('osfstorage')
root_node = osfstorage.get_root()
file.delete()
res = app.get(private_url, auth=user.auth, expect_errors=True)
assert res.status_code == 404
class TestWikiCommentDetailView(CommentDetailMixin):
# private_project_with_comments
@pytest.fixture()
def private_project(self, user, contributor):
private_project = ProjectFactory.create(is_public=False, creator=user, comment_level='private')
private_project.add_contributor(contributor, save=True)
return private_project
@pytest.fixture()
def wiki(self, user, private_project):
with mock.patch('osf.models.AbstractNode.update_search'):
return NodeWikiFactory(node=private_project, user=user)
@pytest.fixture()
def comment(self, user, private_project, wiki):
return CommentFactory(node=private_project, target=Guid.load(wiki._id), user=user)
@pytest.fixture()
def private_url(self, comment):
return '/{}comments/{}/'.format(API_BASE, comment._id)
@pytest.fixture()
def payload(self, comment, set_up_payload):
return set_up_payload(comment._id)
# public_project_with_comments
@pytest.fixture()
def public_project(self, user, contributor):
public_project = ProjectFactory.create(is_public=True, creator=user, comment_level='private')
public_project.add_contributor(contributor, save=True)
return public_project
@pytest.fixture()
def public_wiki(self, user, public_project):
with mock.patch('osf.models.AbstractNode.update_search'):
return NodeWikiFactory(node=public_project, user=user)
@pytest.fixture()
def public_comment(self, user, public_project, public_wiki):
return CommentFactory(node=public_project, target=Guid.load(public_wiki._id), user=user)
@pytest.fixture()
def public_comment_reply(self, user, public_comment, public_project):
reply_target = Guid.load(public_comment._id)
return CommentFactory(node=public_project, target=reply_target, user=user)
@pytest.fixture()
def public_url(self, public_comment):
return '/{}comments/{}/'.format(API_BASE, public_comment._id)
@pytest.fixture()
def public_comment_payload(self, public_comment, set_up_payload):
return set_up_payload(public_comment._id)
# registration_with_comments
@pytest.fixture()
def registration(self, user):
return RegistrationFactory(creator=user, comment_level='private')
@pytest.fixture()
def registration_wiki(self, registration, user):
with mock.patch('osf.models.AbstractNode.update_search'):
return NodeWikiFactory(node=registration, user=user)
@pytest.fixture()
def registration_comment(self, user, registration, registration_wiki):
return CommentFactory(node=registration, target=Guid.load(registration_wiki._id), user=user)
@pytest.fixture()
def comment_url(self, registration_comment):
return '/{}comments/{}/'.format(API_BASE, registration_comment._id)
@pytest.fixture()
def registration_comment_reply(self, user, registration, registration_comment):
reply_target = Guid.load(registration_comment._id)
return CommentFactory(node=registration, target=reply_target, user=user)
def test_wiki_comment_has_target_link_with_correct_type(self, app, public_url, public_wiki):
res = app.get(public_url)
url = res.json['data']['relationships']['target']['links']['related']['href']
expected_url = public_wiki.get_absolute_url()
target_type = res.json['data']['relationships']['target']['links']['related']['meta']['type']
expected_type = 'wiki'
assert res.status_code == 200
assert url == expected_url
assert target_type == expected_type
def test_public_node_non_contrib_commenter_can_update_wiki_comment(self, app, user, non_contrib, set_up_payload):
project = ProjectFactory(is_public=True)
test_wiki = NodeWikiFactory(node=project, user=user)
comment = CommentFactory(node=project, target=Guid.load(test_wiki._id), user=non_contrib)
url = '/{}comments/{}/'.format(API_BASE, comment._id)
payload = set_up_payload(comment._id)
res = app.put_json_api(url, payload, auth=non_contrib.auth)
assert res.status_code == 200
assert payload['data']['attributes']['content'] == res.json['data']['attributes']['content']
def test_public_node_non_contrib_commenter_cannot_update_own_wiki_comment_if_comment_level_private(self, app, user, non_contrib, set_up_payload):
project = ProjectFactory(is_public=True)
test_wiki = NodeWikiFactory(node=project, user=user)
comment = CommentFactory(node=project, target=Guid.load(test_wiki._id), user=non_contrib)
project.comment_level = 'private'
project.save()
url = '/{}comments/{}/'.format(API_BASE, comment._id)
payload = set_up_payload(comment._id)
res = app.put_json_api(url, payload, auth=non_contrib.auth, expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
def test_public_node_non_contrib_commenter_can_delete_wiki_comment(self, app, user, non_contrib):
project = ProjectFactory(is_public=True, comment_level='public')
test_wiki = NodeWikiFactory(node=project, user=user)
comment = CommentFactory(node=project, target=Guid.load(test_wiki._id), user=non_contrib)
url = '/{}comments/{}/'.format(API_BASE, comment._id)
res = app.delete_json_api(url, auth=non_contrib.auth)
assert res.status_code == 204
def test_comment_detail_for_deleted_wiki_is_not_returned(self, app, user, wiki, private_url, private_project):
# Delete commented wiki page
private_project.delete_node_wiki(wiki.page_name, core.Auth(user))
res = app.get(private_url, auth=user.auth, expect_errors=True)
assert res.status_code == 404
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2010 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
from __future__ import print_function
import cmd
import os.path
import pkg_resources
from shlex import shlex
import StringIO
import sys
import traceback
from trac import __version__ as VERSION
from trac.admin.api import AdminCommandError, AdminCommandManager, \
get_console_locale
from trac.config import Configuration
from trac.core import TracError
from trac.env import Environment
from trac.ticket.model import *
from trac.util import translation, warn_setuptools_issue
from trac.util.html import html
from trac.util.text import console_print, exception_to_unicode, printout, \
printerr, raw_input, to_unicode, \
getpreferredencoding
from trac.util.translation import _, ngettext, has_babel, cleandoc_
from trac.versioncontrol.api import RepositoryManager
from trac.web.chrome import default_mainnav_order, default_metanav_order
from trac.wiki.admin import WikiAdmin
from trac.wiki.formatter import MacroError
from trac.wiki.macros import WikiMacroBase
TRAC_VERSION = pkg_resources.get_distribution('Trac').version
rl_completion_suppress_append = None
def find_readline_lib():
"""Return the name (and possibly the full path) of the readline library
linked to the readline module.
"""
import readline
with open(readline.__file__, "rb") as f:
data = f.read()
import re
m = re.search('\0([^\0]*libreadline[^\0]*)\0', data)
if m:
return m.group(1)
return None
class TracAdmin(cmd.Cmd):
intro = ''
doc_header = 'Trac Admin Console %(version)s\n' \
'Available Commands:\n' \
% {'version': TRAC_VERSION}
ruler = ''
prompt = "Trac> "
envname = None
__env = None
needs_upgrade = None
def __init__(self, envdir=None):
cmd.Cmd.__init__(self)
try:
import readline
delims = readline.get_completer_delims()
for c in '-/:()\\':
delims = delims.replace(c, '')
readline.set_completer_delims(delims)
# Work around trailing space automatically inserted by libreadline
# until Python gets fixed, see http://bugs.python.org/issue5833
import ctypes
lib_name = find_readline_lib()
if lib_name is not None:
lib = ctypes.cdll.LoadLibrary(lib_name)
global rl_completion_suppress_append
rl_completion_suppress_append = ctypes.c_int.in_dll(lib,
"rl_completion_suppress_append")
except Exception:
pass
self.interactive = False
if envdir:
self.env_set(os.path.abspath(envdir))
def emptyline(self):
pass
def onecmd(self, line):
"""`line` may be a `str` or an `unicode` object"""
try:
if isinstance(line, str):
if self.interactive:
encoding = sys.stdin.encoding
else:
encoding = getpreferredencoding() # sys.argv
line = to_unicode(line, encoding)
if self.interactive:
line = line.replace('\\', '\\\\')
rv = cmd.Cmd.onecmd(self, line) or 0
except SystemExit:
raise
except AdminCommandError as e:
printerr(_("Error: %(msg)s", msg=to_unicode(e)))
if e.show_usage:
print()
self.do_help(e.cmd or self.arg_tokenize(line)[0])
rv = 2
except TracError as e:
printerr(exception_to_unicode(e))
rv = 2
except Exception as e:
printerr(exception_to_unicode(e))
rv = 2
if self.env_check():
self.env.log.error("Exception in trac-admin command: %s",
exception_to_unicode(e, traceback=True))
if not self.interactive:
return rv
def run(self):
self.interactive = True
printout(_("""Welcome to trac-admin %(version)s
Interactive Trac administration console.
Copyright (C) 2003-2013 Edgewall Software
Type: '?' or 'help' for help on commands.
""", version=TRAC_VERSION))
self.cmdloop()
##
## Environment methods
##
def env_set(self, envname, env=None):
self.envname = envname
self.prompt = "Trac [%s]> " % self.envname
if env is not None:
self.__env = env
def env_check(self):
if not self.__env:
try:
self._init_env()
except Exception:
return False
return True
@property
def env(self):
try:
if not self.__env:
self._init_env()
return self.__env
except Exception as e:
printerr(_("Failed to open environment: %(err)s",
err=exception_to_unicode(e, traceback=True)))
sys.exit(1)
def _init_env(self):
self.__env = env = Environment(self.envname)
negotiated = None
# fixup language according to env settings
if has_babel:
negotiated = get_console_locale(env)
if negotiated:
translation.activate(negotiated)
##
## Utility methods
##
@property
def cmd_mgr(self):
return AdminCommandManager(self.env)
def arg_tokenize(self, argstr):
"""`argstr` is an `unicode` string
... but shlex is not unicode friendly.
"""
lex = shlex(argstr.encode('utf-8'), posix=True)
lex.whitespace_split = True
lex.commenters = ''
if os.name == 'nt':
lex.escape = ''
return [unicode(token, 'utf-8') for token in lex] or ['']
def word_complete(self, text, words):
words = list(set(a for a in words if a.startswith(text)))
if len(words) == 1:
words[0] += ' ' # Only one choice, skip to next arg
return words
@staticmethod
def split_help_text(text):
import re
paragraphs = re.split(r'(?m)(?:^[ \t]*\n){1,}', text)
return [re.sub(r'(?m)\s+', ' ', each.strip())
for each in paragraphs]
@classmethod
def print_doc(cls, docs, stream=None, short=False, long=False):
if stream is None:
stream = sys.stdout
docs = [doc for doc in docs if doc[2]]
if not docs:
return
if short:
max_len = max(len(doc[0]) for doc in docs)
for (cmd, args, doc) in docs:
paragraphs = cls.split_help_text(doc)
console_print(stream, '%s %s' % (cmd.ljust(max_len),
paragraphs[0]))
else:
import textwrap
for (cmd, args, doc) in docs:
paragraphs = cls.split_help_text(doc)
console_print(stream, '%s %s\n' % (cmd, args))
console_print(stream, ' %s\n' % paragraphs[0])
if (long or len(docs) == 1) and len(paragraphs) > 1:
for paragraph in paragraphs[1:]:
console_print(stream, textwrap.fill(paragraph, 79,
initial_indent=' ', subsequent_indent=' ')
+ '\n')
##
## Command dispatcher
##
def complete_line(self, text, line, cmd_only=False):
if rl_completion_suppress_append is not None:
rl_completion_suppress_append.value = 1
args = self.arg_tokenize(line)
if line and line[-1] == ' ': # Space starts new argument
args.append('')
if self.env_check():
try:
comp = self.cmd_mgr.complete_command(args, cmd_only)
except Exception as e:
printerr()
printerr(_('Completion error: %(err)s',
err=exception_to_unicode(e)))
self.env.log.error("trac-admin completion error: %s",
exception_to_unicode(e, traceback=True))
comp = []
if len(args) == 1:
comp.extend(name[3:] for name in self.get_names()
if name.startswith('do_'))
try:
return comp.complete(text)
except AttributeError:
return self.word_complete(text, comp)
def completenames(self, text, line, begidx, endidx):
return self.complete_line(text, line, True)
def completedefault(self, text, line, begidx, endidx):
return self.complete_line(text, line)
def default(self, line):
try:
if not self.__env:
self._init_env()
if self.needs_upgrade is None:
self.needs_upgrade = self.__env.needs_upgrade()
except TracError as e:
raise AdminCommandError(to_unicode(e))
except Exception as e:
raise AdminCommandError(exception_to_unicode(e))
args = self.arg_tokenize(line)
if args[0] == 'upgrade':
self.needs_upgrade = None
elif self.needs_upgrade:
raise TracError(_('The Trac Environment needs to be upgraded.\n\n'
'Run "trac-admin %(path)s upgrade"',
path=self.envname))
return self.cmd_mgr.execute_command(*args)
##
## Available Commands
##
## Help
_help_help = [('help', '', 'Show documentation')]
@classmethod
def all_docs(cls, env=None):
docs = (cls._help_help + cls._help_initenv)
if env is not None:
docs.extend(AdminCommandManager(env).get_command_help())
return docs
def complete_help(self, text, line, begidx, endidx):
return self.complete_line(text, line[5:], True)
def do_help(self, line=None):
arg = self.arg_tokenize(line)
if arg[0]:
cmd_mgr = None
doc = getattr(self, "_help_" + arg[0], None)
if doc is None and self.env_check():
cmd_mgr = self.cmd_mgr
doc = cmd_mgr.get_command_help(arg)
if doc:
self.print_doc(doc)
else:
printerr(_("No documentation found for '%(cmd)s'."
" Use 'help' to see the list of commands.",
cmd=' '.join(arg)))
cmds = None
if cmd_mgr:
cmds = cmd_mgr.get_similar_commands(arg[0])
if cmds:
printout('')
printout(ngettext("Did you mean this?",
"Did you mean one of these?",
len(cmds)))
for cmd in cmds:
printout(' ' + cmd)
else:
printout(_("trac-admin - The Trac Administration Console "
"%(version)s", version=TRAC_VERSION))
if not self.interactive:
print()
printout(_("Usage: trac-admin </path/to/projenv> "
"[command [subcommand] [option ...]]\n")
)
printout(_("Invoking trac-admin without command starts "
"interactive mode.\n"))
env = self.env if self.env_check() else None
self.print_doc(self.all_docs(env), short=True)
## Quit / EOF
_help_quit = [('quit', '', 'Exit the program')]
_help_exit = _help_quit
_help_EOF = _help_quit
def do_quit(self, line):
print()
sys.exit()
do_exit = do_quit # Alias
do_EOF = do_quit # Alias
## Initenv
_help_initenv = [
('initenv', '[<projectname> <db> [<repostype> <repospath>]]',
"""Create and initialize a new environment
If no arguments are given, then the required parameters are requested
interactively.
One or more optional arguments --inherit=PATH can be used to specify
the "[inherit] file" option at environment creation time, so that only
the options not already specified in one of the global configuration
files are written to the conf/trac.ini file of the newly created
environment. Relative paths are resolved relative to the "conf"
directory of the new environment.
The optional argument --config=PATH can be used to specify a
configuration file that is used to populate the environment
configuration. The arguments <projectname>, <db> and any other
arguments passed in the invocation will override values in the
configuration file.
""")]
def do_initdb(self, line):
self.do_initenv(line)
def get_initenv_args(self):
returnvals = []
printout(_("Creating a new Trac environment at %(envname)s",
envname=self.envname))
printout(_("""
Trac will first ask a few questions about your environment
in order to initialize and prepare the project database.
Please enter the name of your project.
This name will be used in page titles and descriptions.
"""))
dp = 'My Project'
returnvals.append(raw_input(_("Project Name [%(default)s]> ",
default=dp)).strip() or dp)
printout(_("""
Please specify the connection string for the database to use.
By default, a local SQLite database is created in the environment
directory. It is also possible to use an existing MySQL or
PostgreSQL database (check the Trac documentation for the exact
connection string syntax).
"""))
ddb = 'sqlite:db/trac.db'
prompt = _("Database connection string [%(default)s]> ", default=ddb)
returnvals.append(raw_input(prompt).strip() or ddb)
print()
return returnvals
def do_initenv(self, line):
def initenv_error(msg):
printerr(_("Initenv for '%(env)s' failed.", env=self.envname),
"\n%s" % msg)
if self.env_check():
initenv_error(_("Does an environment already exist?"))
return 2
if os.path.exists(self.envname) and os.listdir(self.envname):
initenv_error(_("Directory exists and is not empty."))
return 2
if not os.path.exists(os.path.dirname(self.envname)):
initenv_error(_("Base directory '%(env)s' does not exist. Please "
"create it manually and retry.",
env=os.path.dirname(self.envname)))
return 2
arg = self.arg_tokenize(line)
inherit_paths = []
config_file_path = None
i = 0
while i < len(arg):
item = arg[i]
if item.startswith('--inherit='):
inherit_paths.append(arg.pop(i)[10:])
elif item.startswith('--config='):
config_file_path = arg.pop(i)[9:]
else:
i += 1
config = None
if config_file_path:
if not os.path.exists(config_file_path):
initenv_error(_("The file specified in the --config argument "
"does not exist: %(path)s.",
path=config_file_path))
return 2
try:
config = Configuration(config_file_path)
except TracError as e:
initenv_error(e)
return 2
arg = arg or [''] # Reset to usual empty in case we popped the only one
project_name = None
db_str = None
repository_type = None
repository_dir = None
if len(arg) == 1 and not arg[0]:
project_name, db_str = self.get_initenv_args()
elif len(arg) == 2:
project_name, db_str = arg
elif len(arg) == 4:
project_name, db_str, repository_type, repository_dir = arg
else:
initenv_error('Wrong number of arguments: %d' % len(arg))
return 2
try:
printout(_("Creating and Initializing Project"))
options = []
if config:
for section in config.sections(defaults=False):
options.extend((section, option, value)
for option, value
in config.options(section))
options.extend([
('project', 'name', project_name),
('trac', 'database', db_str),
])
def add_nav_order_options(section, default):
for i, name in enumerate(default, 1):
options.append((section, name + '.order', float(i)))
add_nav_order_options('mainnav', default_mainnav_order)
add_nav_order_options('metanav', default_metanav_order)
if repository_dir:
options.extend([
('repositories', '.type', repository_type),
('repositories', '.dir', repository_dir),
])
if inherit_paths:
options.append(('inherit', 'file',
",\n ".join(inherit_paths)))
try:
self.__env = Environment(self.envname, create=True,
options=options)
except Exception as e:
initenv_error(_('Failed to create environment.'))
printerr(e)
traceback.print_exc()
sys.exit(1)
# Add a few default wiki pages
printout(_(" Installing default wiki pages"))
pages_dir = pkg_resources.resource_filename('trac.wiki',
'default-pages')
WikiAdmin(self.__env).load_pages(pages_dir)
if repository_dir:
try:
repos = RepositoryManager(self.__env).get_repository('')
if repos:
printout(_(" Indexing default repository"))
repos.sync(self._resync_feedback)
except TracError as e:
printerr(_("""
---------------------------------------------------------------------
Warning: couldn't index the default repository.
This can happen for a variety of reasons: wrong repository type,
no appropriate third party library for this repository type,
no actual repository at the specified repository path...
You can nevertheless start using your Trac environment, but
you'll need to check again your trac.ini file and the [trac]
repository_type and repository_path settings.
"""))
except Exception as e:
initenv_error(to_unicode(e))
traceback.print_exc()
return 2
printout(_("""
---------------------------------------------------------------------
Project environment for '%(project_name)s' created.
You may now configure the environment by editing the file:
%(config_path)s
If you'd like to take this new project environment for a test drive,
try running the Trac standalone web server `tracd`:
tracd --port 8000 %(project_path)s
Then point your browser to http://localhost:8000/%(project_dir)s.
There you can also browse the documentation for your installed
version of Trac, including information on further setup (such as
deploying Trac to a real web server).
The latest documentation can also always be found on the project
website:
http://trac.edgewall.org/
Congratulations!
""", project_name=project_name, project_path=self.envname,
project_dir=os.path.basename(self.envname),
config_path=os.path.join(self.envname, 'conf', 'trac.ini')))
def _resync_feedback(self, rev):
sys.stdout.write(' [%s]\r' % rev)
sys.stdout.flush()
class TracAdminHelpMacro(WikiMacroBase):
_domain = 'messages'
_description = cleandoc_(
"""Display help for trac-admin commands.
Examples:
{{{
[[TracAdminHelp]] # all commands
[[TracAdminHelp(wiki)]] # all wiki commands
[[TracAdminHelp(wiki export)]] # the "wiki export" command
[[TracAdminHelp(upgrade)]] # the upgrade command
}}}
""")
def expand_macro(self, formatter, name, content):
if content:
arg = content.strip().split()
doc = getattr(TracAdmin, "_help_" + arg[0], None)
if doc is None:
cmd_mgr = AdminCommandManager(self.env)
doc = cmd_mgr.get_command_help(arg)
if not doc:
raise MacroError(_('Unknown trac-admin command '
'"%(command)s"', command=content))
else:
doc = TracAdmin.all_docs(self.env)
buf = StringIO.StringIO()
TracAdmin.print_doc(doc, buf, long=True)
return html.PRE(buf.getvalue().decode('utf-8'), class_='wiki')
def _quote_args(args):
def quote(arg):
if arg.isalnum():
return arg
return '"\'"'.join("'%s'" % v for v in arg.split("'"))
return [quote(arg) for arg in args]
def _run(args):
if args is None:
args = sys.argv[1:]
warn_setuptools_issue()
if sys.flags.optimize != 0:
printerr(_("Python with optimizations is not supported."))
return 2
admin = TracAdmin()
if args:
if args[0] in ('-h', '--help', 'help'):
return admin.onecmd(' '.join(_quote_args(['help'] + args[1:])))
elif args[0] in ('-v','--version'):
printout(os.path.basename(sys.argv[0]), TRAC_VERSION)
else:
env_path = os.path.abspath(args[0])
try:
unicode(env_path, 'ascii')
except UnicodeDecodeError:
printerr(_("Non-ascii environment path '%(path)s' not "
"supported.", path=to_unicode(env_path)))
return 2
admin.env_set(env_path)
if len(args) > 1:
return admin.onecmd(' '.join(_quote_args(args[1:])))
else:
while True:
try:
admin.run()
except KeyboardInterrupt:
admin.do_quit('')
else:
return admin.onecmd("help")
def run(args=None):
"""Main entry point."""
translation.activate(get_console_locale())
try:
return _run(args)
finally:
translation.deactivate()
if __name__ == '__main__':
pkg_resources.require('Trac==%s' % VERSION)
sys.exit(run())
|
|
#
# test_multibytecodec.py
# Unit test for multibytecodec itself
#
from test import support
from test.support import TESTFN
import unittest, io, codecs, sys, os
import _multibytecodec
ALL_CJKENCODINGS = [
# _codecs_cn
'gb2312', 'gbk', 'gb18030', 'hz',
# _codecs_hk
'big5hkscs',
# _codecs_jp
'cp932', 'shift_jis', 'euc_jp', 'euc_jisx0213', 'shift_jisx0213',
'euc_jis_2004', 'shift_jis_2004',
# _codecs_kr
'cp949', 'euc_kr', 'johab',
# _codecs_tw
'big5', 'cp950',
# _codecs_iso2022
'iso2022_jp', 'iso2022_jp_1', 'iso2022_jp_2', 'iso2022_jp_2004',
'iso2022_jp_3', 'iso2022_jp_ext', 'iso2022_kr',
]
class Test_MultibyteCodec(unittest.TestCase):
def test_nullcoding(self):
for enc in ALL_CJKENCODINGS:
self.assertEqual(b''.decode(enc), '')
self.assertEqual(str(b'', enc), '')
self.assertEqual(''.encode(enc), b'')
def test_str_decode(self):
for enc in ALL_CJKENCODINGS:
self.assertEqual('abcd'.encode(enc), b'abcd')
def test_errorcallback_longindex(self):
dec = codecs.getdecoder('euc-kr')
myreplace = lambda exc: ('', sys.maxsize+1)
codecs.register_error('test.cjktest', myreplace)
self.assertRaises(IndexError, dec,
b'apple\x92ham\x93spam', 'test.cjktest')
def test_codingspec(self):
try:
for enc in ALL_CJKENCODINGS:
code = '# coding: {}\n'.format(enc)
exec(code)
finally:
support.unlink(TESTFN)
def test_init_segfault(self):
# bug #3305: this used to segfault
self.assertRaises(AttributeError,
_multibytecodec.MultibyteStreamReader, None)
self.assertRaises(AttributeError,
_multibytecodec.MultibyteStreamWriter, None)
def test_decode_unicode(self):
# Trying to decode an unicode string should raise a TypeError
for enc in ALL_CJKENCODINGS:
self.assertRaises(TypeError, codecs.getdecoder(enc), "")
class Test_IncrementalEncoder(unittest.TestCase):
def test_stateless(self):
# cp949 encoder isn't stateful at all.
encoder = codecs.getincrementalencoder('cp949')()
self.assertEqual(encoder.encode('\ud30c\uc774\uc36c \ub9c8\uc744'),
b'\xc6\xc4\xc0\xcc\xbd\xe3 \xb8\xb6\xc0\xbb')
self.assertEqual(encoder.reset(), None)
self.assertEqual(encoder.encode('\u2606\u223c\u2606', True),
b'\xa1\xd9\xa1\xad\xa1\xd9')
self.assertEqual(encoder.reset(), None)
self.assertEqual(encoder.encode('', True), b'')
self.assertEqual(encoder.encode('', False), b'')
self.assertEqual(encoder.reset(), None)
def test_stateful(self):
# jisx0213 encoder is stateful for a few codepoints. eg)
# U+00E6 => A9DC
# U+00E6 U+0300 => ABC4
# U+0300 => ABDC
encoder = codecs.getincrementalencoder('jisx0213')()
self.assertEqual(encoder.encode('\u00e6\u0300'), b'\xab\xc4')
self.assertEqual(encoder.encode('\u00e6'), b'')
self.assertEqual(encoder.encode('\u0300'), b'\xab\xc4')
self.assertEqual(encoder.encode('\u00e6', True), b'\xa9\xdc')
self.assertEqual(encoder.reset(), None)
self.assertEqual(encoder.encode('\u0300'), b'\xab\xdc')
self.assertEqual(encoder.encode('\u00e6'), b'')
self.assertEqual(encoder.encode('', True), b'\xa9\xdc')
self.assertEqual(encoder.encode('', True), b'')
def test_stateful_keep_buffer(self):
encoder = codecs.getincrementalencoder('jisx0213')()
self.assertEqual(encoder.encode('\u00e6'), b'')
self.assertRaises(UnicodeEncodeError, encoder.encode, '\u0123')
self.assertEqual(encoder.encode('\u0300\u00e6'), b'\xab\xc4')
self.assertRaises(UnicodeEncodeError, encoder.encode, '\u0123')
self.assertEqual(encoder.reset(), None)
self.assertEqual(encoder.encode('\u0300'), b'\xab\xdc')
self.assertEqual(encoder.encode('\u00e6'), b'')
self.assertRaises(UnicodeEncodeError, encoder.encode, '\u0123')
self.assertEqual(encoder.encode('', True), b'\xa9\xdc')
def test_issue5640(self):
encoder = codecs.getincrementalencoder('shift-jis')('backslashreplace')
self.assertEqual(encoder.encode('\xff'), b'\\xff')
self.assertEqual(encoder.encode('\n'), b'\n')
class Test_IncrementalDecoder(unittest.TestCase):
def test_dbcs(self):
# cp949 decoder is simple with only 1 or 2 bytes sequences.
decoder = codecs.getincrementaldecoder('cp949')()
self.assertEqual(decoder.decode(b'\xc6\xc4\xc0\xcc\xbd'),
'\ud30c\uc774')
self.assertEqual(decoder.decode(b'\xe3 \xb8\xb6\xc0\xbb'),
'\uc36c \ub9c8\uc744')
self.assertEqual(decoder.decode(b''), '')
def test_dbcs_keep_buffer(self):
decoder = codecs.getincrementaldecoder('cp949')()
self.assertEqual(decoder.decode(b'\xc6\xc4\xc0'), '\ud30c')
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', True)
self.assertEqual(decoder.decode(b'\xcc'), '\uc774')
self.assertEqual(decoder.decode(b'\xc6\xc4\xc0'), '\ud30c')
self.assertRaises(UnicodeDecodeError, decoder.decode,
b'\xcc\xbd', True)
self.assertEqual(decoder.decode(b'\xcc'), '\uc774')
def test_iso2022(self):
decoder = codecs.getincrementaldecoder('iso2022-jp')()
ESC = b'\x1b'
self.assertEqual(decoder.decode(ESC + b'('), '')
self.assertEqual(decoder.decode(b'B', True), '')
self.assertEqual(decoder.decode(ESC + b'$'), '')
self.assertEqual(decoder.decode(b'B@$'), '\u4e16')
self.assertEqual(decoder.decode(b'@$@'), '\u4e16')
self.assertEqual(decoder.decode(b'$', True), '\u4e16')
self.assertEqual(decoder.reset(), None)
self.assertEqual(decoder.decode(b'@$'), '@$')
self.assertEqual(decoder.decode(ESC + b'$'), '')
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', True)
self.assertEqual(decoder.decode(b'B@$'), '\u4e16')
def test_decode_unicode(self):
# Trying to decode an unicode string should raise a TypeError
for enc in ALL_CJKENCODINGS:
decoder = codecs.getincrementaldecoder(enc)()
self.assertRaises(TypeError, decoder.decode, "")
class Test_StreamReader(unittest.TestCase):
def test_bug1728403(self):
try:
f = open(TESTFN, 'wb')
try:
f.write(b'\xa1')
finally:
f.close()
f = codecs.open(TESTFN, encoding='cp949')
try:
self.assertRaises(UnicodeDecodeError, f.read, 2)
finally:
f.close()
finally:
support.unlink(TESTFN)
class Test_StreamWriter(unittest.TestCase):
if len('\U00012345') == 2: # UCS2
def test_gb18030(self):
s= io.BytesIO()
c = codecs.getwriter('gb18030')(s)
c.write('123')
self.assertEqual(s.getvalue(), b'123')
c.write('\U00012345')
self.assertEqual(s.getvalue(), b'123\x907\x959')
c.write('\U00012345'[0])
self.assertEqual(s.getvalue(), b'123\x907\x959')
c.write('\U00012345'[1] + '\U00012345' + '\uac00\u00ac')
self.assertEqual(s.getvalue(),
b'123\x907\x959\x907\x959\x907\x959\x827\xcf5\x810\x851')
c.write('\U00012345'[0])
self.assertEqual(s.getvalue(),
b'123\x907\x959\x907\x959\x907\x959\x827\xcf5\x810\x851')
self.assertRaises(UnicodeError, c.reset)
self.assertEqual(s.getvalue(),
b'123\x907\x959\x907\x959\x907\x959\x827\xcf5\x810\x851')
def test_utf_8(self):
s= io.BytesIO()
c = codecs.getwriter('utf-8')(s)
c.write('123')
self.assertEqual(s.getvalue(), b'123')
c.write('\U00012345')
self.assertEqual(s.getvalue(), b'123\xf0\x92\x8d\x85')
# Python utf-8 codec can't buffer surrogate pairs yet.
if 0:
c.write('\U00012345'[0])
self.assertEqual(s.getvalue(), b'123\xf0\x92\x8d\x85')
c.write('\U00012345'[1] + '\U00012345' + '\uac00\u00ac')
self.assertEqual(s.getvalue(),
b'123\xf0\x92\x8d\x85\xf0\x92\x8d\x85\xf0\x92\x8d\x85'
b'\xea\xb0\x80\xc2\xac')
c.write('\U00012345'[0])
self.assertEqual(s.getvalue(),
b'123\xf0\x92\x8d\x85\xf0\x92\x8d\x85\xf0\x92\x8d\x85'
b'\xea\xb0\x80\xc2\xac')
c.reset()
self.assertEqual(s.getvalue(),
b'123\xf0\x92\x8d\x85\xf0\x92\x8d\x85\xf0\x92\x8d\x85'
b'\xea\xb0\x80\xc2\xac\xed\xa0\x88')
c.write('\U00012345'[1])
self.assertEqual(s.getvalue(),
b'123\xf0\x92\x8d\x85\xf0\x92\x8d\x85\xf0\x92\x8d\x85'
b'\xea\xb0\x80\xc2\xac\xed\xa0\x88\xed\xbd\x85')
else: # UCS4
pass
def test_streamwriter_strwrite(self):
s = io.BytesIO()
wr = codecs.getwriter('gb18030')(s)
wr.write('abcd')
self.assertEqual(s.getvalue(), b'abcd')
class Test_ISO2022(unittest.TestCase):
def test_g2(self):
iso2022jp2 = b'\x1b(B:hu4:unit\x1b.A\x1bNi de famille'
uni = ':hu4:unit\xe9 de famille'
self.assertEqual(iso2022jp2.decode('iso2022-jp-2'), uni)
def test_iso2022_jp_g0(self):
self.assertNotIn(b'\x0e', '\N{SOFT HYPHEN}'.encode('iso-2022-jp-2'))
for encoding in ('iso-2022-jp-2004', 'iso-2022-jp-3'):
e = '\u3406'.encode(encoding)
self.assertFalse(any(x > 0x80 for x in e))
def test_bug1572832(self):
for x in range(0x10000, 0x110000):
# Any ISO 2022 codec will cause the segfault
chr(x).encode('iso_2022_jp', 'ignore')
class TestStateful(unittest.TestCase):
text = '\u4E16\u4E16'
encoding = 'iso-2022-jp'
expected = b'\x1b$B@$@$'
reset = b'\x1b(B'
expected_reset = expected + reset
def test_encode(self):
self.assertEqual(self.text.encode(self.encoding), self.expected_reset)
def test_incrementalencoder(self):
encoder = codecs.getincrementalencoder(self.encoding)()
output = b''.join(
encoder.encode(char)
for char in self.text)
self.assertEqual(output, self.expected)
self.assertEqual(encoder.encode('', final=True), self.reset)
self.assertEqual(encoder.encode('', final=True), b'')
def test_incrementalencoder_final(self):
encoder = codecs.getincrementalencoder(self.encoding)()
last_index = len(self.text) - 1
output = b''.join(
encoder.encode(char, index == last_index)
for index, char in enumerate(self.text))
self.assertEqual(output, self.expected_reset)
self.assertEqual(encoder.encode('', final=True), b'')
class TestHZStateful(TestStateful):
text = '\u804a\u804a'
encoding = 'hz'
expected = b'~{ADAD'
reset = b'~}'
expected_reset = expected + reset
def test_main():
support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
|
|
from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from ..constants import scopingElements, tableInsertModeElements, namespaces
# The scope markers are inserted when entering object elements,
# marquees, table cells, and table captions, and are used to prevent formatting
# from "leaking" into tables, object elements, and marquees.
Marker = None
listElementsMap = {
None: (frozenset(scopingElements), False),
"button": (frozenset(scopingElements | set([(namespaces["html"], "button")])), False),
"list": (frozenset(scopingElements | set([(namespaces["html"], "ol"),
(namespaces["html"], "ul")])), False),
"table": (frozenset([(namespaces["html"], "html"),
(namespaces["html"], "table")]), False),
"select": (frozenset([(namespaces["html"], "optgroup"),
(namespaces["html"], "option")]), True)
}
class Node(object):
def __init__(self, name):
"""Node representing an item in the tree.
name - The tag name associated with the node
parent - The parent of the current node (or None for the document node)
value - The value of the current node (applies to text nodes and
comments
attributes - a dict holding name, value pairs for attributes of the node
childNodes - a list of child nodes of the current node. This must
include all elements but not necessarily other node types
_flags - A list of miscellaneous flags that can be set on the node
"""
self.name = name
self.parent = None
self.value = None
self.attributes = {}
self.childNodes = []
self._flags = []
def __str__(self):
attributesStr = " ".join(["%s=\"%s\"" % (name, value)
for name, value in
self.attributes.items()])
if attributesStr:
return "<%s %s>" % (self.name, attributesStr)
else:
return "<%s>" % (self.name)
def __repr__(self):
return "<%s>" % (self.name)
def appendChild(self, node):
"""Insert node as a child of the current node
"""
raise NotImplementedError
def insertText(self, data, insertBefore=None):
"""Insert data as text in the current node, positioned before the
start of node insertBefore or to the end of the node's text.
"""
raise NotImplementedError
def insertBefore(self, node, refNode):
"""Insert node as a child of the current node, before refNode in the
list of child nodes. Raises ValueError if refNode is not a child of
the current node"""
raise NotImplementedError
def removeChild(self, node):
"""Remove node from the children of the current node
"""
raise NotImplementedError
def reparentChildren(self, newParent):
"""Move all the children of the current node to newParent.
This is needed so that trees that don't store text as nodes move the
text in the correct way
"""
# XXX - should this method be made more general?
for child in self.childNodes:
newParent.appendChild(child)
self.childNodes = []
def cloneNode(self):
"""Return a shallow copy of the current node i.e. a node with the same
name and attributes but with no parent or child nodes
"""
raise NotImplementedError
def hasContent(self):
"""Return true if the node has children or text, false otherwise
"""
raise NotImplementedError
class ActiveFormattingElements(list):
def append(self, node):
equalCount = 0
if node != Marker:
for element in self[::-1]:
if element == Marker:
break
if self.nodesEqual(element, node):
equalCount += 1
if equalCount == 3:
self.remove(element)
break
list.append(self, node)
def nodesEqual(self, node1, node2):
if not node1.nameTuple == node2.nameTuple:
return False
if not node1.attributes == node2.attributes:
return False
return True
class TreeBuilder(object):
"""Base treebuilder implementation
documentClass - the class to use for the bottommost node of a document
elementClass - the class to use for HTML Elements
commentClass - the class to use for comments
doctypeClass - the class to use for doctypes
"""
# Document class
documentClass = None
# The class to use for creating a node
elementClass = None
# The class to use for creating comments
commentClass = None
# The class to use for creating doctypes
doctypeClass = None
# Fragment class
fragmentClass = None
def __init__(self, namespaceHTMLElements):
if namespaceHTMLElements:
self.defaultNamespace = "http://www.w3.org/1999/xhtml"
else:
self.defaultNamespace = None
self.reset()
def reset(self):
self.openElements = []
self.activeFormattingElements = ActiveFormattingElements()
# XXX - rename these to headElement, formElement
self.headPointer = None
self.formPointer = None
self.insertFromTable = False
self.document = self.documentClass()
def elementInScope(self, target, variant=None):
# If we pass a node in we match that. if we pass a string
# match any node with that name
exactNode = hasattr(target, "nameTuple")
listElements, invert = listElementsMap[variant]
for node in reversed(self.openElements):
if (node.name == target and not exactNode or
node == target and exactNode):
return True
elif (invert ^ (node.nameTuple in listElements)):
return False
assert False # We should never reach this point
def reconstructActiveFormattingElements(self):
# Within this algorithm the order of steps described in the
# specification is not quite the same as the order of steps in the
# code. It should still do the same though.
# Step 1: stop the algorithm when there's nothing to do.
if not self.activeFormattingElements:
return
# Step 2 and step 3: we start with the last element. So i is -1.
i = len(self.activeFormattingElements) - 1
entry = self.activeFormattingElements[i]
if entry == Marker or entry in self.openElements:
return
# Step 6
while entry != Marker and entry not in self.openElements:
if i == 0:
# This will be reset to 0 below
i = -1
break
i -= 1
# Step 5: let entry be one earlier in the list.
entry = self.activeFormattingElements[i]
while True:
# Step 7
i += 1
# Step 8
entry = self.activeFormattingElements[i]
clone = entry.cloneNode() # Mainly to get a new copy of the attributes
# Step 9
element = self.insertElement({"type": "StartTag",
"name": clone.name,
"namespace": clone.namespace,
"data": clone.attributes})
# Step 10
self.activeFormattingElements[i] = element
# Step 11
if element == self.activeFormattingElements[-1]:
break
def clearActiveFormattingElements(self):
entry = self.activeFormattingElements.pop()
while self.activeFormattingElements and entry != Marker:
entry = self.activeFormattingElements.pop()
def elementInActiveFormattingElements(self, name):
"""Check if an element exists between the end of the active
formatting elements and the last marker. If it does, return it, else
return false"""
for item in self.activeFormattingElements[::-1]:
# Check for Marker first because if it's a Marker it doesn't have a
# name attribute.
if item == Marker:
break
elif item.name == name:
return item
return False
def insertRoot(self, token):
element = self.createElement(token)
self.openElements.append(element)
self.document.appendChild(element)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
doctype = self.doctypeClass(name, publicId, systemId)
self.document.appendChild(doctype)
def insertComment(self, token, parent=None):
if parent is None:
parent = self.openElements[-1]
parent.appendChild(self.commentClass(token["data"]))
def createElement(self, token):
"""Create an element but don't insert it anywhere"""
name = token["name"]
namespace = token.get("namespace", self.defaultNamespace)
element = self.elementClass(name, namespace)
element.attributes = token["data"]
return element
def _getInsertFromTable(self):
return self._insertFromTable
def _setInsertFromTable(self, value):
"""Switch the function used to insert an element from the
normal one to the misnested table one and back again"""
self._insertFromTable = value
if value:
self.insertElement = self.insertElementTable
else:
self.insertElement = self.insertElementNormal
insertFromTable = property(_getInsertFromTable, _setInsertFromTable)
def insertElementNormal(self, token):
name = token["name"]
assert isinstance(name, text_type), "Element %s not unicode" % name
namespace = token.get("namespace", self.defaultNamespace)
element = self.elementClass(name, namespace)
element.attributes = token["data"]
self.openElements[-1].appendChild(element)
self.openElements.append(element)
return element
def insertElementTable(self, token):
"""Create an element and insert it into the tree"""
element = self.createElement(token)
if self.openElements[-1].name not in tableInsertModeElements:
return self.insertElementNormal(token)
else:
# We should be in the InTable mode. This means we want to do
# special magic element rearranging
parent, insertBefore = self.getTableMisnestedNodePosition()
if insertBefore is None:
parent.appendChild(element)
else:
parent.insertBefore(element, insertBefore)
self.openElements.append(element)
return element
def insertText(self, data, parent=None):
"""Insert text data."""
if parent is None:
parent = self.openElements[-1]
if (not self.insertFromTable or (self.insertFromTable and
self.openElements[-1].name
not in tableInsertModeElements)):
parent.insertText(data)
else:
# We should be in the InTable mode. This means we want to do
# special magic element rearranging
parent, insertBefore = self.getTableMisnestedNodePosition()
parent.insertText(data, insertBefore)
def getTableMisnestedNodePosition(self):
"""Get the foster parent element, and sibling to insert before
(or None) when inserting a misnested table node"""
# The foster parent element is the one which comes before the most
# recently opened table element
# XXX - this is really inelegant
lastTable = None
fosterParent = None
insertBefore = None
for elm in self.openElements[::-1]:
if elm.name == "table":
lastTable = elm
break
if lastTable:
# XXX - we should really check that this parent is actually a
# node here
if lastTable.parent:
fosterParent = lastTable.parent
insertBefore = lastTable
else:
fosterParent = self.openElements[
self.openElements.index(lastTable) - 1]
else:
fosterParent = self.openElements[0]
return fosterParent, insertBefore
def generateImpliedEndTags(self, exclude=None):
name = self.openElements[-1].name
# XXX td, th and tr are not actually needed
if (name in frozenset(("dd", "dt", "li", "option", "optgroup", "p", "rp", "rt"))
and name != exclude):
self.openElements.pop()
# XXX This is not entirely what the specification says. We should
# investigate it more closely.
self.generateImpliedEndTags(exclude)
def getDocument(self):
"Return the final tree"
return self.document
def getFragment(self):
"Return the final fragment"
# assert self.innerHTML
fragment = self.fragmentClass()
self.openElements[0].reparentChildren(fragment)
return fragment
def testSerializer(self, node):
"""Serialize the subtree of node in the format required by unit tests
node - the node from which to start serializing"""
raise NotImplementedError
|
|
"""
Tools for visualizing dependencies between Terms.
"""
from __future__ import unicode_literals
from contextlib import contextmanager
import errno
from functools import partial
from io import BytesIO
from subprocess import Popen, PIPE
from networkx import topological_sort
from six import iteritems
from zipline.pipeline.data import BoundColumn
from zipline.pipeline import Filter, Factor, Classifier, Term
from zipline.pipeline.term import AssetExists
class NoIPython(Exception):
pass
def delimit(delimiters, content):
"""
Surround `content` with the first and last characters of `delimiters`.
>>> delimit('[]', "foo")
[foo]
>>> delimit('""', "foo")
'"foo"'
"""
if len(delimiters) != 2:
raise ValueError(
"`delimiters` must be of length 2. Got %r" % delimiters
)
return ''.join([delimiters[0], content, delimiters[1]])
quote = partial(delimit, '""')
bracket = partial(delimit, '[]')
def begin_graph(f, name, **attrs):
writeln(f, "strict digraph %s {" % name)
writeln(f, "graph {}".format(format_attrs(attrs)))
def begin_cluster(f, name, **attrs):
attrs.setdefault("label", quote(name))
writeln(f, "subgraph cluster_%s {" % name)
writeln(f, "graph {}".format(format_attrs(attrs)))
def end_graph(f):
writeln(f, '}')
@contextmanager
def graph(f, name, **attrs):
begin_graph(f, name, **attrs)
yield
end_graph(f)
@contextmanager
def cluster(f, name, **attrs):
begin_cluster(f, name, **attrs)
yield
end_graph(f)
def roots(g):
"Get nodes from graph G with indegree 0"
return set(n for n, d in iteritems(g.in_degree()) if d == 0)
def _render(g, out, format_, include_asset_exists=False):
"""
Draw `g` as a graph to `out`, in format `format`.
Parameters
----------
g : zipline.pipeline.graph.TermGraph
Graph to render.
out : file-like object
format_ : str {'png', 'svg'}
Output format.
include_asset_exists : bool
Whether to filter out `AssetExists()` nodes.
"""
graph_attrs = {'rankdir': 'TB', 'splines': 'ortho'}
cluster_attrs = {'style': 'filled', 'color': 'lightgoldenrod1'}
in_nodes = list(node for node in g if node.atomic)
out_nodes = list(g.outputs.values())
f = BytesIO()
with graph(f, "G", **graph_attrs):
# Write outputs cluster.
with cluster(f, 'Output', labelloc='b', **cluster_attrs):
for term in out_nodes:
add_term_node(f, term)
# Write inputs cluster.
with cluster(f, 'Input', **cluster_attrs):
for term in in_nodes:
if term is AssetExists() and not include_asset_exists:
continue
add_term_node(f, term)
# Write intermediate results.
for term in topological_sort(g):
if term in in_nodes or term in out_nodes:
continue
add_term_node(f, term)
# Write edges
for source, dest in g.edges():
if source is AssetExists() and not include_asset_exists:
continue
add_edge(f, id(source), id(dest))
cmd = ['dot', '-T', format_]
try:
proc = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
except OSError as e:
if e.errno == errno.ENOENT:
raise RuntimeError(
"Couldn't find `dot` graph layout program. "
"Make sure Graphviz is installed and `dot` is on your path."
)
else:
raise
f.seek(0)
proc_stdout, proc_stderr = proc.communicate(f.read())
if proc_stderr:
raise RuntimeError(
"Error(s) while rendering graph: %s" % proc_stderr.decode('utf-8')
)
out.write(proc_stdout)
def display_graph(g, format='svg', include_asset_exists=False):
"""
Display a TermGraph interactively from within IPython.
"""
try:
import IPython.display as display
except ImportError:
raise NoIPython("IPython is not installed. Can't display graph.")
if format == 'svg':
display_cls = display.SVG
elif format in ("jpeg", "png"):
display_cls = partial(display.Image, format=format, embed=True)
out = BytesIO()
_render(g, out, format, include_asset_exists=include_asset_exists)
return display_cls(data=out.getvalue())
def writeln(f, s):
f.write((s + '\n').encode('utf-8'))
def fmt(obj):
if isinstance(obj, Term):
if hasattr(obj, 'short_repr'):
r = obj.short_repr()
else:
r = type(obj).__name__
else:
r = obj
return '"%s"' % r
def add_term_node(f, term):
declare_node(f, id(term), attrs_for_node(term))
def declare_node(f, name, attributes):
writeln(f, "{0} {1};".format(name, format_attrs(attributes)))
def add_edge(f, source, dest):
writeln(f, "{0} -> {1};".format(source, dest))
def attrs_for_node(term, **overrides):
attrs = {
'shape': 'box',
'colorscheme': 'pastel19',
'style': 'filled',
'label': fmt(term),
}
if isinstance(term, BoundColumn):
attrs['fillcolor'] = '1'
if isinstance(term, Factor):
attrs['fillcolor'] = '2'
elif isinstance(term, Filter):
attrs['fillcolor'] = '3'
elif isinstance(term, Classifier):
attrs['fillcolor'] = '4'
attrs.update(**overrides or {})
return attrs
def format_attrs(attrs):
"""
Format key, value pairs from attrs into graphviz attrs format
Example
-------
>>> format_attrs({'key1': 'value1', 'key2': 'value2'})
'[key1=value1, key2=value2]'
"""
if not attrs:
return ''
entries = ['='.join((key, value)) for key, value in iteritems(attrs)]
return '[' + ', '.join(entries) + ']'
|
|
import numpy
import numpy.ma
import cpgReport
from CGATReport.odict import OrderedDict as odict
##########################################################################
class Annotations(cpgReport.cpgTracker):
"""Base class for trackers getting info from the annotations tables.
Derived Trackers should define the two attributes :attr:`mSelect` and :attr:`mColumns`. """
pattern = "(.*)_annotations$"
mTable = "annotations"
mSelect = None
mColumns = None
mWhere = "1"
def __call__(self, track, slice=None):
where = self.mWhere
select = self.mSelect
table = self.mTable
if slice == "all" or slice is None:
data = self.getFirstRow(
"%(select)s FROM %(track)s_%(table)s WHERE %(where)s" % locals())
else:
data = self.getFirstRow(
"%(select)s FROM %(track)s_%(table)s WHERE %(where)s AND is_%slices" % locals())
return odict(list(zip(self.mColumns, data)))
##########################################################################
class AllAnnotations(Annotations):
"""Annotations of all transcript models."""
mColumns = ["cds",
"utr",
"upstream",
"downstream",
"intronic",
"intergenic",
"flank",
"ambiguous"]
mSelect = """SELECT
sum(is_cds) AS cds,
sum(is_utr) AS utr,
sum(is_upstream) AS upstream,
sum(is_downstream) AS downstream,
sum(is_intronic) AS intronic,
sum(is_intergenic) AS intergenic,
sum(is_flank) AS flank,
sum(is_ambiguous) AS ambiguous"""
##########################################################################
class AnnotationsBases(Annotations):
"""Annotations as bases."""
mColumns = ["total", "CDS", "UTRPromotor", "intronic", "intergenic"]
mSelect = """SELECT
sum( exons_sum) AS total,
sum( nover_CDS ) AS cds,
sum( nover_UTR + nover_UTR3 + nover_UTR5 + nover_flank + nover_5flank + nover_3flank) AS utr,
sum( nover_intronic) AS intronic,
sum( nover_intergenic) AS intergenic """
##########################################################################
class AnnotationsAssociated(cpgReport.cpgTracker):
"""simple join between a data table and table defining slices.
:attr:`mTable`
table to join with
:attr:`mColums`
columns to output
"""
mPattern = "_annotations$"
mTable = None
mColumns = None
mWhere = "1"
mSelectAll = "SELECT %(columns)s FROM %(track)s_%(table)s AS t WHERE %(where)s"
mSelectSubset = "SELECT %(columns)s FROM %(track)s_%(table)s AS t, %(track)s_annotation AS a WHERE a.gene_id = t.gene_id AND a.is_%(slice)s AND %(where)s"
mSelectSlice = "SELECT %(columns)s FROM %(track)s_%(table)s AS t, %(track)s_%(slice)s AS s WHERE s.gene_id = t.gene_id AND %(where)s"
mSelectMixture = "SELECT %(columns)s FROM %(track)s_%(table)s AS t, %(subset)s AS s, %(track)s_annotation AS a WHERE a.gene_id = t.gene_id AND a.is_%(slice)s AND s.gene_id = t.gene_id AND %(where)s"
def getStatement(self, track, slice=None):
columns = self.mColumns
table = self.mTable
where = self.mWhere
if not table or not columns:
raise NotImplementedError
if slice and "." in slice:
slice, subset = slice.split(".")
return self.mSelectMixture % locals()
elif slice == "all" or slice is None:
return self.mSelectAll % locals()
else:
return self.mSelectSubset % locals()
##########################################################################
class RepeatOverlap(AnnotationsAssociated):
"""Overlap with repeats."""
mPattern = "_repeats$"
mColumns = "SUM(CASE WHEN nover>0 THEN 1 ELSE 0 END) as with, SUM(CASE WHEN nover=0 THEN 1 ELSE 0 END) AS without"
mTable = "repeats"
def __call__(self, track, slice=None):
statement = self.getStatement(track, slice)
if not statement:
return []
return odict(list(zip(("with", "without"), self.getFirstRow(statement))))
##########################################################################
##########################################################################
##########################################################################
class TSSOverlap(cpgReport.cpgTracker):
'''number of TSS that an interval overlaps.'''
mPattern = "_tss$"
mAnnotations = "annotations"
mTable = "tss"
mColumn = "d.is_overlap"
mWhere = "d.is_overlap < 5 "
def __call__(self, track, slice=None):
annotations = self.mAnnotations
table = self.mTable
column, where = self.mColumn, self.mWhere
if not slice or slice == "all":
data = self.getValues(
"""SELECT %(column)s FROM %(track)s_%(table)s AS d WHERE %(where)s""" % locals() )
else:
data = self.getValues( """SELECT %(column)s FROM %(track)s_%(table)s AS d, %(track)s_%(annotations)s as a
WHERE d.gene_id = a.gene_id AND a.is_%(slice)s AND %(where)s""" % locals() )
hist, bins = numpy.histogram(
data, bins=numpy.arange(0, max(data) + 1, 1))
return odict(list(zip(list(map(str, bins[:-1])), hist)))
##########################################################################
class TSSClosest(cpgReport.cpgTracker):
"""for each interval, return the distance to the closest TSS."""
mXLabel = "distance / bases"
mPattern = "_tss$"
mColumn = "d.closest_dist"
mWhere = "1"
mAnnotations = "annotations"
mTable = "tss"
def __call__(self, track, slice=None):
annotations = self.mAnnotations
table = self.mTable
column, where = self.mColumn, self.mWhere
if not slice or slice == "all":
data = self.get(
"""SELECT %(column)s FROM %(track)s_%(table)s AS d WHERE %(where)s""" % locals() )
else:
data = self.get( """SELECT %(column)s FROM %(track)s_%(table)s AS d, %(track)s_%(annotations)s as a
WHERE d.gene_id = a.gene_id AND a.is_%(slice)s AND %(where)s""" % locals() )
return data
##########################################################################
class TSSClosestUpstream(TSSClosest):
"""for each interval, return peakval and the distance to the closest upstream TSS."""
mColumn = "d.dist5"
mWhere = "d.dist5 > 0"
##########################################################################
class TSSClosestDownstream(TSSClosest):
"""for each interval, return peakval and the distance to the closest downstream TSS."""
mColumn = "d.dist3"
mWhere = "d.dist3 > 0"
##########################################################################
class TSSProfile(cpgReport.cpgTracker):
"""Get profile around TSS"""
mPattern = "_tss$"
def __call__(self, track, slice=None):
statement1 = """SELECT (closest_dist*-1) as d from %(track)s_tss where closest_dist=dist5 """
statement2 = """SELECT closest_dist as d from %(track)s_tss where closest_dist=dist3 """
data1 = self.getValues(statement1)
data2 = self.getValues(statement2)
return {"Genomic_distance": data1 + data2}
##########################################################################
class TTSProfile(cpgReport.cpgTracker):
"""Get profile around TTS"""
mPattern = "_tts$"
def __call__(self, track, slice=None):
statement1 = """SELECT (closest_dist*-1) as d from %(track)s_tts where closest_dist=dist5 """
statement2 = """SELECT closest_dist as d from %(track)s_tts where closest_dist=dist3 """
data1 = self.getValues(statement1)
data2 = self.getValues(statement2)
return {"Genomic_distance": data1 + data2}
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_vmpools
short_description: Module to manage VM pools in oVirt/RHV
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "Module to manage VM pools in oVirt/RHV."
options:
name:
description:
- "Name of the VM pool to manage."
required: true
state:
description:
- "Should the VM pool be present/absent."
- "Note that when C(state) is I(absent) all VMs in VM pool are stopped and removed."
choices: ['present', 'absent']
default: present
template:
description:
- "Name of the template, which will be used to create VM pool."
description:
description:
- "Description of the VM pool."
cluster:
description:
- "Name of the cluster, where VM pool should be created."
type:
description:
- "Type of the VM pool. Either manual or automatic."
- "C(manual) - The administrator is responsible for explicitly returning the virtual machine to the pool.
The virtual machine reverts to the original base image after the administrator returns it to the pool."
- "C(Automatic) - When the virtual machine is shut down, it automatically reverts to its base image and
is returned to the virtual machine pool."
- "Default value is set by engine."
choices: ['manual', 'automatic']
vm_per_user:
description:
- "Maximum number of VMs a single user can attach to from this pool."
- "Default value is set by engine."
prestarted:
description:
- "Number of pre-started VMs defines the number of VMs in run state, that are waiting
to be attached to Users."
- "Default value is set by engine."
vm_count:
description:
- "Number of VMs in the pool."
- "Default value is set by engine."
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Create VM pool from template
- ovirt_vmpools:
cluster: mycluster
name: myvmpool
template: rhel7
vm_count: 2
prestarted: 2
vm_per_user: 1
# Remove vmpool, note that all VMs in pool will be stopped and removed:
- ovirt_vmpools:
state: absent
name: myvmpool
'''
RETURN = '''
id:
description: ID of the VM pool which is managed
returned: On success if VM pool is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
vm_pool:
description: "Dictionary of all the VM pool attributes. VM pool attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/vm_pool."
returned: On success if VM pool is found.
type: dict
'''
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_params,
check_sdk,
create_connection,
equal,
get_link_name,
ovirt_full_argument_spec,
wait,
)
class VmPoolsModule(BaseModule):
def build_entity(self):
return otypes.VmPool(
name=self._module.params['name'],
description=self._module.params['description'],
comment=self._module.params['comment'],
cluster=otypes.Cluster(
name=self._module.params['cluster']
) if self._module.params['cluster'] else None,
template=otypes.Template(
name=self._module.params['template']
) if self._module.params['template'] else None,
max_user_vms=self._module.params['vm_per_user'],
prestarted_vms=self._module.params['prestarted'],
size=self._module.params['vm_count'],
type=otypes.VmPoolType(
self._module.params['type']
) if self._module.params['type'] else None,
)
def update_check(self, entity):
return (
equal(self._module.params.get('cluster'), get_link_name(self._connection, entity.cluster)) and
equal(self._module.params.get('description'), entity.description) and
equal(self._module.params.get('comment'), entity.comment) and
equal(self._module.params.get('vm_per_user'), entity.max_user_vms) and
equal(self._module.params.get('prestarted'), entity.prestarted_vms) and
equal(self._module.params.get('vm_count'), entity.size)
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent'],
default='present',
),
name=dict(default=None, required=True),
template=dict(default=None),
cluster=dict(default=None),
description=dict(default=None),
comment=dict(default=None),
vm_per_user=dict(default=None, type='int'),
prestarted=dict(default=None, type='int'),
vm_count=dict(default=None, type='int'),
type=dict(default=None, choices=['automatic', 'manual']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
check_sdk(module)
check_params(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
vm_pools_service = connection.system_service().vm_pools_service()
vm_pools_module = VmPoolsModule(
connection=connection,
module=module,
service=vm_pools_service,
)
state = module.params['state']
if state == 'present':
ret = vm_pools_module.create()
# Wait for all VM pool VMs to be created:
if module.params['wait']:
vms_service = connection.system_service().vms_service()
for vm in vms_service.list(search='pool=%s' % module.params['name']):
wait(
service=vms_service.service(vm.id),
condition=lambda vm: vm.status in [otypes.VmStatus.DOWN, otypes.VmStatus.UP],
timeout=module.params['timeout'],
)
elif state == 'absent':
ret = vm_pools_module.remove()
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Optional, Sequence, Union
import pkg_resources
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.longrunning import operations_pb2
from google.oauth2 import service_account # type: ignore
from google.protobuf import empty_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google.api_core.operations_v1",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class OperationsTransport(abc.ABC):
"""Abstract transport class for Operations."""
AUTH_SCOPES = ()
DEFAULT_HOST: str = "longrunning.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.list_operations: gapic_v1.method.wrap_method(
self.list_operations,
default_retry=retries.Retry(
initial=0.5,
maximum=10.0,
multiplier=2.0,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=10.0,
),
default_timeout=10.0,
client_info=client_info,
),
self.get_operation: gapic_v1.method.wrap_method(
self.get_operation,
default_retry=retries.Retry(
initial=0.5,
maximum=10.0,
multiplier=2.0,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=10.0,
),
default_timeout=10.0,
client_info=client_info,
),
self.delete_operation: gapic_v1.method.wrap_method(
self.delete_operation,
default_retry=retries.Retry(
initial=0.5,
maximum=10.0,
multiplier=2.0,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=10.0,
),
default_timeout=10.0,
client_info=client_info,
),
self.cancel_operation: gapic_v1.method.wrap_method(
self.cancel_operation,
default_retry=retries.Retry(
initial=0.5,
maximum=10.0,
multiplier=2.0,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=10.0,
),
default_timeout=10.0,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def list_operations(
self,
) -> Callable[
[operations_pb2.ListOperationsRequest],
Union[
operations_pb2.ListOperationsResponse,
Awaitable[operations_pb2.ListOperationsResponse],
],
]:
raise NotImplementedError()
@property
def get_operation(
self,
) -> Callable[
[operations_pb2.GetOperationRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def delete_operation(
self,
) -> Callable[
[operations_pb2.DeleteOperationRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def cancel_operation(
self,
) -> Callable[
[operations_pb2.CancelOperationRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
__all__ = ("OperationsTransport",)
|
|
#!/usr/bin/env python
"""
Copyright 2009 Richard Quirk
Licensed under the Apache License, Version 2.0 (the "License"); you may not
use this file except in compliance with the License. You may obtain a copy of
the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations under
the License.
"""
import contextlib
import sys
import unittest
import cmakelint.main
import cmakelint.__version__
import os
# stderr suppression from https://stackoverflow.com/a/1810086
@contextlib.contextmanager
def nostderr():
savestderr = sys.stderr
class Devnull(object):
def write(self, _): pass
def flush(self): pass
sys.stderr = Devnull()
try:
yield
finally:
sys.stderr = savestderr
class ErrorCollector(object):
def __init__(self):
self._errors = []
def __call__(self, unused_filename, unused_line, category, message):
if cmakelint.main.ShouldPrintError(category):
self._errors.append(message)
def Results(self):
if len(self._errors) < 2:
return ''.join(self._errors)
return self._errors
class CMakeLintTestBase(unittest.TestCase):
def doTestLint(self, code, expected_message):
errors = ErrorCollector()
clean_lines = cmakelint.main.CleansedLines([code])
cmakelint.main.ProcessLine('foo.cmake', 0, clean_lines, errors)
self.assertEqual(expected_message, errors.Results())
def doTestMultiLineLint(self, code, expected_message):
errors = ErrorCollector()
clean_lines = cmakelint.main.CleansedLines(code.split('\n'))
for i in clean_lines.LineNumbers():
cmakelint.main.ProcessLine('foo.cmake', i, clean_lines, errors)
self.assertEqual(expected_message, errors.Results())
def doTestCheckRepeatLogic(self, code, expected_message):
errors = ErrorCollector()
clean_lines = cmakelint.main.CleansedLines(code.split('\n'))
for i in clean_lines.LineNumbers():
cmakelint.main.CheckRepeatLogic('foo.cmake', i, clean_lines, errors)
self.assertEqual(expected_message, errors.Results())
def doTestCheckFileName(self, filename, expected_message):
errors = ErrorCollector()
cmakelint.main.CheckFileName(filename, errors)
self.assertEqual(expected_message, errors.Results())
def doTestCheckFindPackage(self, filename, code, expected_message):
errors = ErrorCollector()
clean_lines = cmakelint.main.CleansedLines(code.split('\n'))
for i in clean_lines.LineNumbers():
cmakelint.main.CheckFindPackage(filename, i, clean_lines, errors)
cmakelint.main._package_state.Done(filename, errors)
self.assertEqual(expected_message, errors.Results())
def doTestGetArgument(self, expected_arg, code):
clean_lines = cmakelint.main.CleansedLines(code.split('\n'))
self.assertEqual(expected_arg, cmakelint.main.GetCommandArgument(0, clean_lines))
class CMakeLintTest(CMakeLintTestBase):
def setUp(self):
cmakelint.main._lint_state.filters = []
def testLineLength(self):
self.doTestLint(
'# '+('o'*80),
'Lines should be <= 80 characters long')
def testUpperAndLowerCase(self):
self.doTestMultiLineLint(
'''project()\nCMAKE_MINIMUM_REQUIRED()\n''',
'Do not mix upper and lower case commands')
def testContainsCommand(self):
self.assertTrue(cmakelint.main.ContainsCommand('project()'))
self.assertTrue(cmakelint.main.ContainsCommand('project('))
self.assertTrue(cmakelint.main.ContainsCommand('project ( '))
self.assertFalse(cmakelint.main.ContainsCommand('VERSION'))
def testGetCommand(self):
self.assertEqual('project', cmakelint.main.GetCommand('project()'))
self.assertEqual('project', cmakelint.main.GetCommand('project('))
self.assertEqual('project', cmakelint.main.GetCommand('project ( '))
self.assertEqual('', cmakelint.main.GetCommand('VERSION'))
def testIsCommandUpperCase(self):
self.assertTrue(cmakelint.main.IsCommandUpperCase('PROJECT'))
self.assertTrue(cmakelint.main.IsCommandUpperCase('CMAKE_MINIMUM_REQUIRED'))
self.assertFalse(cmakelint.main.IsCommandUpperCase('cmake_minimum_required'))
self.assertFalse(cmakelint.main.IsCommandUpperCase('project'))
self.assertFalse(cmakelint.main.IsCommandUpperCase('PrOjEct'))
def testIsCommandMixedCase(self):
self.assertTrue(cmakelint.main.IsCommandMixedCase('PrOjEct'))
self.assertFalse(cmakelint.main.IsCommandMixedCase('project'))
self.assertFalse(cmakelint.main.IsCommandMixedCase('CMAKE_MINIMUM_REQUIRED'))
self.assertTrue(cmakelint.main.IsCommandMixedCase('CMAKE_MINIMUM_required'))
def testCleanComment(self):
self.assertEqual(('', False), cmakelint.main.CleanComments('# Comment to zap'))
self.assertEqual(
('project()', False),
cmakelint.main.CleanComments('project() # Comment to zap'))
def testCleanCommentQuotes(self):
self.assertEqual(
('CHECK_C_SOURCE_COMPILES("', True),
cmakelint.main.CleanComments('CHECK_C_SOURCE_COMPILES("'))
self.assertEqual(
('', True),
cmakelint.main.CleanComments(' some line in a comment ', True))
self.assertEqual(
('")', False),
cmakelint.main.CleanComments(' end of comment") ', True))
def testCommandSpaces(self):
self.doTestMultiLineLint(
"""project ()""",
"Extra spaces between 'project' and its ()")
def testTabs(self):
self.doTestLint('\tfoo()', 'Tab found; please use spaces')
def testTrailingSpaces(self):
self.doTestLint('# test ', 'Line ends in whitespace')
self.doTestMultiLineLint(' foo() \n foo()\n', 'Line ends in whitespace')
self.doTestLint(' set(var value)', '')
def testCommandSpaceBalance(self):
self.doTestMultiLineLint(
"""project( Foo)""",
'Mismatching spaces inside () after command')
self.doTestMultiLineLint(
"""project(Foo )""",
'Mismatching spaces inside () after command')
def testCommandNotEnded(self):
self.doTestMultiLineLint(
"""project(
Foo
#
#""",
'Unable to find the end of this command')
def testRepeatLogicExpression(self):
self.doTestCheckRepeatLogic('else(foo)',
'Expression repeated inside else; '
'better to use only else()')
self.doTestCheckRepeatLogic('ELSEIF(NOT ${VAR})', '')
self.doTestCheckRepeatLogic('ENDMACRO( my_macro foo bar baz)',
'Expression repeated inside endmacro; '
'better to use only ENDMACRO()')
def testFindTool(self):
self.doTestCheckFileName('path/to/FindFooBar.cmake',
'Find modules should use uppercase names; '
'consider using FindFOOBAR.cmake')
self.doTestCheckFileName('CMakeLists.txt', '')
self.doTestCheckFileName('cmakeLists.txt',
'File should be called CMakeLists.txt')
def testIsFindPackage(self):
self.assertTrue(cmakelint.main.IsFindPackage('path/to/FindFOO.cmake'))
self.assertFalse(cmakelint.main.IsFindPackage('path/to/FeatureFOO.cmake'))
def testCheckFindPackage(self):
self.doTestCheckFindPackage(
'FindFoo.cmake',
'',
['Package should include FindPackageHandleStandardArgs',
'Package should use FIND_PACKAGE_HANDLE_STANDARD_ARGS'])
self.doTestCheckFindPackage(
'FindFoo.cmake',
'''INCLUDE(FindPackageHandleStandardArgs)''',
'Package should use FIND_PACKAGE_HANDLE_STANDARD_ARGS')
self.doTestCheckFindPackage(
'FindFoo.cmake',
'''FIND_PACKAGE_HANDLE_STANDARD_ARGS(FOO DEFAULT_MSG)''',
'Package should include FindPackageHandleStandardArgs')
self.doTestCheckFindPackage(
'FindFoo.cmake',
'''INCLUDE(FindPackageHandleStandardArgs)
FIND_PACKAGE_HANDLE_STANDARD_ARGS(KK DEFAULT_MSG)''',
'Weird variable passed to std args, should be FOO not KK')
self.doTestCheckFindPackage(
'FindFoo.cmake',
'''INCLUDE(FindPackageHandleStandardArgs)
FIND_PACKAGE_HANDLE_STANDARD_ARGS(FOO DEFAULT_MSG)''',
'')
def testGetCommandArgument(self):
self.doTestGetArgument('KK',
'''SET(
KK)''')
self.doTestGetArgument('KK', 'Set( KK)')
self.doTestGetArgument('KK', 'FIND_PACKAGE_HANDLE_STANDARD_ARGS(KK BLEUGH)')
def testIsValidFile(self):
self.assertTrue(cmakelint.main.IsValidFile('CMakeLists.txt'))
self.assertTrue(cmakelint.main.IsValidFile('cmakelists.txt'))
self.assertTrue(cmakelint.main.IsValidFile('/foo/bar/baz/CMakeLists.txt'))
self.assertTrue(cmakelint.main.IsValidFile('Findkk.cmake'))
self.assertFalse(cmakelint.main.IsValidFile('foobar.h.in'))
def testFilterControl(self):
self.doTestMultiLineLint(('# lint_cmake: -whitespace/eol\n'
' foo() \n'
' foo()\n'), '')
def testBadPragma(self):
self.doTestMultiLineLint(('# lint_cmake: I am badly formed\n'
'if(TRUE)\n'
'endif()\n'),
'Filter should start with - or +')
def testBadPragma2(self):
self.doTestMultiLineLint(('# lint_cmake: -unknown thing\n'
'if(TRUE)\n'
'endif()\n'),
'Filter not allowed: -unknown thing')
def testWhitespaceIssue16(self):
self.doTestMultiLineLint(('if(${CONDITION})\n'
' set(VAR\n'
' foo\n'
' bar\n'
' )\n'
'endif()\n'),
'')
def testWhitespaceIssue16NonRegression(self):
self.doTestMultiLineLint(('if(${CONDITION})\n'
' set(VAR\n'
' foo\n'
' bar)\n'
'endif()\n'),
'')
def testWhitespaceIssue16FalseNegative(self):
self.doTestMultiLineLint(('if(${CONDITION})\n'
' set(VAR\n'
' foo\n'
' bar )\n'
'endif()\n'),
'Mismatching spaces inside () after command')
def testNoEnd(self):
self.doTestMultiLineLint('file(APPEND ${OUT} "#endif${nl}")\n', '')
def testBackslashComment(self):
self.doTestMultiLineLint( r'file(APPEND ${OUT} " \"") # comment\n', '')
def testFalsePositiveSourceCompiles(self):
self.doTestMultiLineLint((
'CHECK_C_SOURCE_COMPILES("\n'
'#include\n'
'void foo(void) {}\n'
'int main()\n'
'{\n'
'pthread_once_t once_control = PTHREAD_ONCE_INIT;\n'
'pthread_once(&once_control, foo);\n'
'return 0;\n'
'}"\n'
'HAVE_PTHREAD_ONCE_INIT\n'
')\n'), '')
def testIndent(self):
try:
cmakelint.main._lint_state.spaces = 2
self.doTestLint('no_indent(test)', '')
self.doTestLint(' two_indent(test)', '')
self.doTestLint(' four_indent(test)', '')
self.doTestLint(' one_indent(test)',
'Weird indentation; use 2 spaces')
self.doTestLint(' three_indent(test)',
'Weird indentation; use 2 spaces')
cmakelint.main._lint_state.spaces = 3
self.doTestLint('no_indent(test)', '')
self.doTestLint(' two_indent(test)',
'Weird indentation; use 3 spaces')
self.doTestLint(' four_indent(test)',
'Weird indentation; use 3 spaces')
self.doTestLint(' one_indent(test)',
'Weird indentation; use 3 spaces')
self.doTestLint(' three_indent(test)', '')
finally:
cmakelint.main._lint_state.spaces = 2
def testParseArgs(self):
old_usage = cmakelint.main._USAGE
old_version = cmakelint.__version__.VERSION
old_cats = cmakelint.main._ERROR_CATEGORIES
old_spaces = cmakelint.main._lint_state.spaces
try:
cmakelint.main._USAGE = ""
cmakelint.main._ERROR_CATEGORIES = ""
cmakelint.main._VERSION = ""
with nostderr():
self.assertRaises(SystemExit, cmakelint.main.ParseArgs, [])
self.assertRaises(SystemExit, cmakelint.main.ParseArgs, ['--help'])
self.assertRaises(SystemExit, cmakelint.main.ParseArgs, ['--bogus-option'])
self.assertRaises(SystemExit, cmakelint.main.ParseArgs, ['--filter='])
self.assertRaises(SystemExit, cmakelint.main.ParseArgs, ['--filter=foo'])
self.assertRaises(SystemExit, cmakelint.main.ParseArgs, ['--filter=+x,b,-c', 'foo.cmake'])
self.assertRaises(SystemExit, cmakelint.main.ParseArgs, ['--spaces=c', 'foo.cmake'])
self.assertRaises(SystemExit, cmakelint.main.ParseArgs, ['--version'])
cmakelint.main._lint_state.filters = []
self.assertEqual(['foo.cmake'], cmakelint.main.ParseArgs(['--filter=-whitespace', 'foo.cmake']))
cmakelint.main._lint_state.filters = []
self.assertEqual(['foo.cmake'], cmakelint.main.ParseArgs(['foo.cmake']))
filt = '-,+whitespace'
cmakelint.main._lint_state.filters = []
self.assertEqual(['foo.cmake'], cmakelint.main.ParseArgs(['--config=None', '--spaces=3', '--filter='+filt, 'foo.cmake']))
self.assertEqual(['-', '+whitespace'], cmakelint.main._lint_state.filters)
self.assertEqual(3, cmakelint.main._lint_state.spaces)
cmakelint.main._lint_state.filters = []
filt = '-,+whitespace/eol, +whitespace/tabs'
self.assertEqual(['foo.cmake'], cmakelint.main.ParseArgs(['--config=None', '--spaces=3', '--filter='+filt, 'foo.cmake']))
self.assertEqual(['-', '+whitespace/eol', '+whitespace/tabs'], cmakelint.main._lint_state.filters)
cmakelint.main._lint_state.filters = []
cmakelint.main.ParseArgs(['--config=./foo/bar', 'foo.cmake'])
self.assertEqual('./foo/bar', cmakelint.main._lint_state.config)
cmakelint.main.ParseArgs(['--config=None', 'foo.cmake'])
self.assertEqual(None, cmakelint.main._lint_state.config)
cmakelint.main.ParseArgs(['foo.cmake'])
self.assertEqual(os.environ['HOME']+os.path.sep+'.cmakelintrc', cmakelint.main._lint_state.config)
finally:
cmakelint.main._USAGE = old_usage
cmakelint.main._ERROR_CATEGORIES = old_cats
cmakelint.main._VERSION = old_version
cmakelint.main._lint_state.filters = []
cmakelint.main._lint_state.spaces = old_spaces
def testParseOptionsFile(self):
old_usage = cmakelint.main._USAGE
old_cats = cmakelint.main._ERROR_CATEGORIES
old_spaces = cmakelint.main._lint_state.spaces
try:
cmakelint.main._USAGE = ""
cmakelint.main._ERROR_CATEGORIES = ""
cmakelint.main.ParseOptionFile("""
# skip comment
filter=-,+whitespace
spaces= 3
""".split('\n'), ignore_space=False)
self.assertEqual(['-', '+whitespace'], cmakelint.main._lint_state.filters)
cmakelint.main.ParseArgs(['--filter=+syntax','foo.cmake'])
self.assertEqual(['-', '+whitespace', '+syntax'], cmakelint.main._lint_state.filters)
self.assertEqual(3, cmakelint.main._lint_state.spaces)
cmakelint.main._lint_state.spaces = 2
cmakelint.main.ParseOptionFile("""
# skip comment
spaces= 4
""".split('\n'), ignore_space=True)
self.assertEqual(2, cmakelint.main._lint_state.spaces)
cmakelint.main.ParseOptionFile("""
# skip comment
linelength= 90
""".split('\n'), ignore_space=True)
self.assertEqual(90, cmakelint.main._lint_state.linelength)
cmakelint.main.ParseOptionFile("""
# skip comment
""".split('\n'), ignore_space=False)
self.assertEqual(2, cmakelint.main._lint_state.spaces)
cmakelint.main.ParseOptionFile("""
quiet
""".split('\n'), ignore_space=False)
self.assertTrue(cmakelint.main._lint_state.quiet)
cmakelint.main._lint_state.quiet = True
cmakelint.main.ParseOptionFile("""
# quiet
""".split('\n'), ignore_space=False)
self.assertTrue(cmakelint.main._lint_state.quiet)
finally:
cmakelint.main._USAGE = old_usage
cmakelint.main._ERROR_CATEGORIES = old_cats
cmakelint.main._lint_state.spaces = old_spaces
if __name__ == '__main__':
unittest.main()
|
|
import http.server
import os
import re
import config as cfg
from sequencePlaylist import SequencePlaylist
if not cfg.NO_PI:
from neopixelSequencePlayer import NeopixelSequencePlayer
from sequence import Sequence
from sequencePlayerWindow import SequencePlayerWindow
if cfg.REQUIRES_AUTH or cfg.USE_ADMIN_AUTH:
import base64
class StoppableHTTPServer(http.server.HTTPServer):
def run(self):
try:
self.serve_forever()
except KeyboardInterrupt:
pass
finally:
# Clean-up server (close socket, etc.)
print("Stopping server")
self.server_close()
PLAYER.stop()
class Handler(http.server.SimpleHTTPRequestHandler):
def __init__(self, request, client_address, server):
super(Handler, self).__init__(request, client_address, server)
self.isuser = False
self.isadmin = False
def do_GET(self):
# Construct a server response.
try:
if not self.check_auth():
return
if self.path.endswith(".js") or \
self.path.endswith(".css") or \
self.path.endswith(".ico") or \
self.path.endswith(".png" or \
self.path.endswith(".jpg")):
f = self.send_head()
if f:
self.copyfile(f, self.wfile)
f.close()
else:
if self.path == "/auth" and not self.isuser:
self.send_response(401)
self.send_header(
'WWW-Authenticate',
'Basic realm="Authenticate"')
self.end_headers()
return
else:
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
if self.path == "/get/sequences/html/":
for file in os.listdir(cfg.SEQUENCE_DIR):
btnclass = "default"
if not PLAYER.currentplaylist is None and PLAYER.currentplaylist.name == file:
btnclass = "primary"
self.wfile.write(bytearray(
('<button class="btn btn-' + btnclass + '">' + file + '</button>').encode()
))
elif self.path == "/get/playlists/html/":
for file in os.listdir(cfg.PLAYLIST_DIR):
btnclass = "default"
if not PLAYER.currentplaylist is None and PLAYER.currentplaylist.name == file:
btnclass = "primary"
self.wfile.write(bytearray(
('<button class="btn btn-' + btnclass + '">' + file + '</button>').encode()
))
elif self.path == "/get/sequences/":
for file in os.listdir(cfg.SEQUENCE_DIR):
self.wfile.write(bytearray((file + "\n").encode()))
elif self.path == "/get/playlists/":
for file in os.listdir(cfg.PLAYLIST_DIR):
self.wfile.write(bytearray((file + "\n").encode()))
elif self.path == "/get/current/":
if PLAYER.currentplaylist is not None:
self.wfile.write(bytearray(PLAYER.currentplaylist.name.encode()))
else:
self.wfile.write(b"")
elif self.path == "/stop/":
self.wfile.write(b"")
PLAYER.stopcurrentplaylist()
PLAYER.clear()
elif str(self.path).startswith("/set/sequence/"):
name = str(self.path)[14:]
self.wfile.write(b"")
path = os.path.join(cfg.SEQUENCE_DIR, name)
try:
PLAYER.runsequence(Sequence.parsefile(path))
except FileNotFoundError:
print("Sequence '{0}' does not exist!".format(path))
elif str(self.path).startswith("/set/playlist/"):
name = str(self.path)[14:]
self.wfile.write(b"")
path = os.path.join(cfg.PLAYLIST_DIR, name)
try:
PLAYER.runplaylist(SequencePlaylist.parsefile(path))
except FileNotFoundError:
print("Playlist '{0}' does not exist!".format(path))
else:
f = open(cfg.HTML_FILE, "r").read()
self.wfile.write(
self.parsefile(f).encode('utf-8')
)
except Exception as ex:
print("ERROR: {0}".format(ex))
self.send_response(500)
self.end_headers()
if cfg.VERBOSE_LOGGING:
raise ex
def do_POST(self):
try:
if not self.check_auth(True):
return
self.send_response(200)
self.send_header('Content-type', "text/html")
self.end_headers()
self.wfile.write(b"")
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
post_string = post_data.decode("utf-8")
if self.path == "/set/sequence/":
seq = Sequence.parsestring(post_string)
PLAYER.runsequence(seq)
elif self.path == "/set/playlist/":
playlist = SequencePlaylist.parsestring(post_string)
PLAYER.runplaylist(playlist)
except Exception as ex:
print("ERROR: {0}".format(ex))
self.send_response(500)
self.end_headers()
if cfg.VERBOSE_LOGGING:
raise ex
def log_message(self, format, *args):
if cfg.VERBOSE_LOGGING:
http.server.SimpleHTTPRequestHandler.log_message(self, format, *args)
def check_auth(self, adminonly=False):
success = True
admincorrect = False
usercorrect = False
authheader = self.headers['Authorization']
if authheader and \
authheader.startswith('Basic'):
credentials = authheader.split(' ')[1]
decoded = base64.b64decode(bytes(credentials, 'utf8')).decode('utf-8')
user, password = decoded.split(":")
admincorrect = \
user == cfg.AUTH_ADMIN_USER and \
password == cfg.AUTH_ADMIN_PASS
usercorrect = \
user == cfg.AUTH_USER and \
password == cfg.AUTH_PASS
if cfg.REQUIRES_AUTH or \
adminonly and cfg.USE_ADMIN_AUTH:
success = (adminonly and admincorrect) or \
(not adminonly and (admincorrect or usercorrect))
if not success:
if cfg.VERBOSE_LOGGING:
print("Showing login prompt to user")
message = "The christmastree requires login" if not adminonly \
else "For this part you need to login as admin"
self.send_response(401)
self.send_header(
'WWW-Authenticate',
'Basic realm="' + message + '"')
self.end_headers()
self.isuser = usercorrect or admincorrect
self.isadmin = not cfg.USE_ADMIN_AUTH or admincorrect
if success and cfg.VERBOSE_LOGGING:
print("is user: {0}, is admin: {1}".format(self.isuser, self.isadmin))
return success
def parsefile(self, file):
result = file
if not self.isadmin:
regex = re.compile(r"(\#admin.*?\#end)", re.IGNORECASE | re.DOTALL)
result = re.sub(regex, '', result)
else:
result = result.replace("#admin", "")
result = result.replace("#end", "")
return result
if not os.path.exists(cfg.SEQUENCE_DIR):
os.makedirs(cfg.SEQUENCE_DIR)
if not os.path.exists(cfg.PLAYLIST_DIR):
os.makedirs(cfg.PLAYLIST_DIR)
if cfg.NO_PI:
PLAYER = SequencePlayerWindow()
else:
PLAYER = NeopixelSequencePlayer()
if cfg.STARTUP_PLAYLIST:
if cfg.VERBOSE_LOGGING:
print("Going to run the startup playlist")
path = os.path.join(cfg.PLAYLIST_DIR, cfg.STARTUP_PLAYLIST)
try:
PLAYER.runplaylist(SequencePlaylist.parsefile(path))
except FileNotFoundError:
print("Startup playlist '{0}' does not exist!".format(path))
elif cfg.STARTUP_SEQUENCE:
if cfg.VERBOSE_LOGGING:
print("Going to run the startup sequence")
path = os.path.join(cfg.SEQUENCE_DIR, cfg.STARTUP_SEQUENCE)
try:
PLAYER.runsequence(Sequence.parsefile(path))
except FileNotFoundError:
print("Startup sequence '{0}' does not exist!".format(path))
if __name__ == '__main__':
print('Server listening on port {0}...'.format(cfg.PORT))
HTTPD = StoppableHTTPServer(('', cfg.PORT), Handler)
HTTPD.run()
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###################################################################
# WARNING!
#
# Do not edit this file directly. This file should be generated by
# running the command "tox -e genopts" any time a config option
# has been added, changed, or removed.
###################################################################
import itertools
from keystoneauth1 import loading
from cinder import objects # noqa
objects.register_all()
from cinder.api import common as cinder_api_common
from cinder.api.middleware import auth as cinder_api_middleware_auth
import cinder.api.openstack
from cinder.api.views import versions as cinder_api_views_versions
from cinder.backup import api as cinder_backup_api
from cinder.backup import chunkeddriver as cinder_backup_chunkeddriver
from cinder.backup import driver as cinder_backup_driver
from cinder.backup.drivers import ceph as cinder_backup_drivers_ceph
from cinder.backup.drivers import gcs as cinder_backup_drivers_gcs
from cinder.backup.drivers import glusterfs as cinder_backup_drivers_glusterfs
from cinder.backup.drivers import nfs as cinder_backup_drivers_nfs
from cinder.backup.drivers import posix as cinder_backup_drivers_posix
from cinder.backup.drivers import s3 as cinder_backup_drivers_s3
from cinder.backup.drivers import swift as cinder_backup_drivers_swift
from cinder.backup import manager as cinder_backup_manager
from cinder.cmd import backup as cinder_cmd_backup
from cinder.cmd import volume as cinder_cmd_volume
from cinder.common import config as cinder_common_config
import cinder.compute
from cinder.compute import nova as cinder_compute_nova
from cinder import context as cinder_context
from cinder import coordination as cinder_coordination
from cinder.db import api as cinder_db_api
from cinder.image import glance as cinder_image_glance
from cinder.image import image_utils as cinder_image_imageutils
from cinder.keymgr import conf_key_mgr as cinder_keymgr_confkeymgr
from cinder.message import api as cinder_message_api
from cinder import quota as cinder_quota
from cinder.scheduler import driver as cinder_scheduler_driver
from cinder.scheduler import host_manager as cinder_scheduler_hostmanager
from cinder.scheduler import manager as cinder_scheduler_manager
from cinder.scheduler import scheduler_options as \
cinder_scheduler_scheduleroptions
from cinder.scheduler.weights import capacity as \
cinder_scheduler_weights_capacity
from cinder.scheduler.weights import volume_number as \
cinder_scheduler_weights_volumenumber
from cinder import service as cinder_service
from cinder import service_auth as cinder_serviceauth
from cinder import ssh_utils as cinder_sshutils
from cinder.transfer import api as cinder_transfer_api
from cinder.volume import api as cinder_volume_api
from cinder.volume import driver as cinder_volume_driver
from cinder.volume.drivers.ceph import rbd_iscsi as \
cinder_volume_drivers_ceph_rbdiscsi
from cinder.volume.drivers.datera import datera_iscsi as \
cinder_volume_drivers_datera_dateraiscsi
from cinder.volume.drivers.dell_emc.powerflex import driver as \
cinder_volume_drivers_dell_emc_powerflex_driver
from cinder.volume.drivers.dell_emc.powermax import common as \
cinder_volume_drivers_dell_emc_powermax_common
from cinder.volume.drivers.dell_emc.powerstore import driver as \
cinder_volume_drivers_dell_emc_powerstore_driver
from cinder.volume.drivers.dell_emc.powervault import common as \
cinder_volume_drivers_dell_emc_powervault_common
from cinder.volume.drivers.dell_emc.sc import storagecenter_common as \
cinder_volume_drivers_dell_emc_sc_storagecentercommon
from cinder.volume.drivers.dell_emc.unity import driver as \
cinder_volume_drivers_dell_emc_unity_driver
from cinder.volume.drivers.dell_emc.vnx import common as \
cinder_volume_drivers_dell_emc_vnx_common
from cinder.volume.drivers.dell_emc import xtremio as \
cinder_volume_drivers_dell_emc_xtremio
from cinder.volume.drivers.fujitsu.eternus_dx import eternus_dx_common as \
cinder_volume_drivers_fujitsu_eternus_dx_eternusdxcommon
from cinder.volume.drivers.fusionstorage import dsware as \
cinder_volume_drivers_fusionstorage_dsware
from cinder.volume.drivers.hitachi import hbsd_common as \
cinder_volume_drivers_hitachi_hbsdcommon
from cinder.volume.drivers.hitachi import hbsd_rest as \
cinder_volume_drivers_hitachi_hbsdrest
from cinder.volume.drivers.hitachi import hbsd_rest_fc as \
cinder_volume_drivers_hitachi_hbsdrestfc
from cinder.volume.drivers.hpe import hpe_3par_common as \
cinder_volume_drivers_hpe_hpe3parcommon
from cinder.volume.drivers.hpe import nimble as \
cinder_volume_drivers_hpe_nimble
from cinder.volume.drivers.huawei import common as \
cinder_volume_drivers_huawei_common
from cinder.volume.drivers.ibm import flashsystem_common as \
cinder_volume_drivers_ibm_flashsystemcommon
from cinder.volume.drivers.ibm import flashsystem_iscsi as \
cinder_volume_drivers_ibm_flashsystemiscsi
from cinder.volume.drivers.ibm import gpfs as cinder_volume_drivers_ibm_gpfs
from cinder.volume.drivers.ibm.ibm_storage import ds8k_proxy as \
cinder_volume_drivers_ibm_ibm_storage_ds8kproxy
from cinder.volume.drivers.ibm.ibm_storage import ibm_storage as \
cinder_volume_drivers_ibm_ibm_storage_ibmstorage
from cinder.volume.drivers.ibm.storwize_svc import storwize_svc_common as \
cinder_volume_drivers_ibm_storwize_svc_storwizesvccommon
from cinder.volume.drivers.ibm.storwize_svc import storwize_svc_fc as \
cinder_volume_drivers_ibm_storwize_svc_storwizesvcfc
from cinder.volume.drivers.ibm.storwize_svc import storwize_svc_iscsi as \
cinder_volume_drivers_ibm_storwize_svc_storwizesvciscsi
from cinder.volume.drivers import infinidat as cinder_volume_drivers_infinidat
from cinder.volume.drivers.infortrend.raidcmd_cli import common_cli as \
cinder_volume_drivers_infortrend_raidcmd_cli_commoncli
from cinder.volume.drivers.inspur.as13000 import as13000_driver as \
cinder_volume_drivers_inspur_as13000_as13000driver
from cinder.volume.drivers.inspur.instorage import instorage_common as \
cinder_volume_drivers_inspur_instorage_instoragecommon
from cinder.volume.drivers.inspur.instorage import instorage_iscsi as \
cinder_volume_drivers_inspur_instorage_instorageiscsi
from cinder.volume.drivers.kaminario import kaminario_common as \
cinder_volume_drivers_kaminario_kaminariocommon
from cinder.volume.drivers.kioxia import kumoscale as \
cinder_volume_drivers_kioxia_kumoscale
from cinder.volume.drivers.lenovo import lenovo_common as \
cinder_volume_drivers_lenovo_lenovocommon
from cinder.volume.drivers import lightos as cinder_volume_drivers_lightos
from cinder.volume.drivers import linstordrv as \
cinder_volume_drivers_linstordrv
from cinder.volume.drivers import lvm as cinder_volume_drivers_lvm
from cinder.volume.drivers.macrosan import driver as \
cinder_volume_drivers_macrosan_driver
from cinder.volume.drivers.nec.v import nec_v_rest as \
cinder_volume_drivers_nec_v_necvrest
from cinder.volume.drivers.netapp import options as \
cinder_volume_drivers_netapp_options
from cinder.volume.drivers.nexenta import options as \
cinder_volume_drivers_nexenta_options
from cinder.volume.drivers import nfs as cinder_volume_drivers_nfs
from cinder.volume.drivers.open_e import options as \
cinder_volume_drivers_open_e_options
from cinder.volume.drivers.prophetstor import options as \
cinder_volume_drivers_prophetstor_options
from cinder.volume.drivers import pure as cinder_volume_drivers_pure
from cinder.volume.drivers import qnap as cinder_volume_drivers_qnap
from cinder.volume.drivers import quobyte as cinder_volume_drivers_quobyte
from cinder.volume.drivers import rbd as cinder_volume_drivers_rbd
from cinder.volume.drivers import remotefs as cinder_volume_drivers_remotefs
from cinder.volume.drivers.san.hp import hpmsa_common as \
cinder_volume_drivers_san_hp_hpmsacommon
from cinder.volume.drivers.san import san as cinder_volume_drivers_san_san
from cinder.volume.drivers.sandstone import sds_driver as \
cinder_volume_drivers_sandstone_sdsdriver
from cinder.volume.drivers import solidfire as cinder_volume_drivers_solidfire
from cinder.volume.drivers import storpool as cinder_volume_drivers_storpool
from cinder.volume.drivers.stx import common as \
cinder_volume_drivers_stx_common
from cinder.volume.drivers.synology import synology_common as \
cinder_volume_drivers_synology_synologycommon
from cinder.volume.drivers.toyou.acs5000 import acs5000_common as \
cinder_volume_drivers_toyou_acs5000_acs5000common
from cinder.volume.drivers.veritas_access import veritas_iscsi as \
cinder_volume_drivers_veritas_access_veritasiscsi
from cinder.volume.drivers.vmware import vmdk as \
cinder_volume_drivers_vmware_vmdk
from cinder.volume.drivers import vzstorage as cinder_volume_drivers_vzstorage
from cinder.volume.drivers.windows import iscsi as \
cinder_volume_drivers_windows_iscsi
from cinder.volume.drivers.windows import smbfs as \
cinder_volume_drivers_windows_smbfs
from cinder.volume.drivers.zadara import zadara as \
cinder_volume_drivers_zadara_zadara
from cinder.volume import manager as cinder_volume_manager
from cinder.volume.targets import spdknvmf as cinder_volume_targets_spdknvmf
from cinder.wsgi import eventlet_server as cinder_wsgi_eventletserver
from cinder.zonemanager.drivers.brocade import brcd_fabric_opts as \
cinder_zonemanager_drivers_brocade_brcdfabricopts
from cinder.zonemanager.drivers.brocade import brcd_fc_zone_driver as \
cinder_zonemanager_drivers_brocade_brcdfczonedriver
from cinder.zonemanager.drivers.cisco import cisco_fabric_opts as \
cinder_zonemanager_drivers_cisco_ciscofabricopts
from cinder.zonemanager.drivers.cisco import cisco_fc_zone_driver as \
cinder_zonemanager_drivers_cisco_ciscofczonedriver
from cinder.zonemanager import fc_zone_manager as \
cinder_zonemanager_fczonemanager
def list_opts():
return [
('backend',
itertools.chain(
[cinder_cmd_volume.host_opt],
)),
('brcd_fabric_example',
itertools.chain(
cinder_zonemanager_drivers_brocade_brcdfabricopts.
brcd_zone_opts,
)),
('cisco_fabric_example',
itertools.chain(
cinder_zonemanager_drivers_cisco_ciscofabricopts.
cisco_zone_opts,
)),
('coordination',
itertools.chain(
cinder_coordination.coordination_opts,
)),
('DEFAULT',
itertools.chain(
cinder_api_common.api_common_opts,
[cinder_api_middleware_auth.use_forwarded_for_opt],
cinder.api.openstack.openstack_api_opts,
cinder_api_views_versions.versions_opts,
cinder_backup_api.backup_opts,
cinder_backup_chunkeddriver.backup_opts,
cinder_backup_driver.backup_opts,
cinder_backup_drivers_ceph.service_opts,
cinder_backup_drivers_gcs.gcsbackup_service_opts,
cinder_backup_drivers_glusterfs.glusterfsbackup_service_opts,
cinder_backup_drivers_nfs.nfsbackup_service_opts,
cinder_backup_drivers_posix.posixbackup_service_opts,
cinder_backup_drivers_s3.s3backup_service_opts,
cinder_backup_drivers_swift.swiftbackup_service_opts,
cinder_backup_manager.backup_manager_opts,
cinder_cmd_backup.backup_cmd_opts,
[cinder_cmd_volume.cluster_opt],
cinder_common_config.api_opts,
cinder_common_config.core_opts,
cinder_common_config.auth_opts,
cinder_common_config.backup_opts,
cinder_common_config.image_opts,
cinder_common_config.global_opts,
cinder_common_config.compression_opts,
cinder.compute.compute_opts,
cinder_context.context_opts,
cinder_db_api.db_opts,
cinder_db_api.backup_opts,
cinder_image_glance.image_opts,
cinder_image_glance.glance_core_properties_opts,
cinder_image_imageutils.image_opts,
cinder_message_api.messages_opts,
cinder_quota.quota_opts,
cinder_scheduler_driver.scheduler_driver_opts,
cinder_scheduler_hostmanager.host_manager_opts,
cinder_scheduler_manager.scheduler_manager_opts,
[cinder_scheduler_scheduleroptions.
scheduler_json_config_location_opt],
cinder_scheduler_weights_capacity.capacity_weight_opts,
cinder_scheduler_weights_volumenumber.
volume_number_weight_opts,
cinder_service.service_opts,
cinder_sshutils.ssh_opts,
cinder_transfer_api.volume_transfer_opts,
[cinder_volume_api.allow_force_upload_opt],
[cinder_volume_api.volume_host_opt],
[cinder_volume_api.volume_same_az_opt],
[cinder_volume_api.az_cache_time_opt],
cinder_volume_driver.volume_opts,
cinder_volume_driver.iser_opts,
cinder_volume_driver.nvmet_opts,
cinder_volume_driver.scst_opts,
cinder_volume_driver.backup_opts,
cinder_volume_driver.image_opts,
cinder_volume_drivers_datera_dateraiscsi.d_opts,
cinder_volume_drivers_fusionstorage_dsware.volume_opts,
cinder_volume_drivers_infortrend_raidcmd_cli_commoncli.
infortrend_opts,
cinder_volume_drivers_inspur_as13000_as13000driver.
inspur_as13000_opts,
cinder_volume_drivers_inspur_instorage_instoragecommon.
instorage_mcs_opts,
cinder_volume_drivers_inspur_instorage_instorageiscsi.
instorage_mcs_iscsi_opts,
cinder_volume_drivers_kioxia_kumoscale.KUMOSCALE_OPTS,
cinder_volume_drivers_open_e_options.jdss_connection_opts,
cinder_volume_drivers_open_e_options.jdss_iscsi_opts,
cinder_volume_drivers_open_e_options.jdss_volume_opts,
cinder_volume_drivers_sandstone_sdsdriver.sds_opts,
cinder_volume_drivers_toyou_acs5000_acs5000common.
acs5000c_opts,
cinder_volume_drivers_veritas_access_veritasiscsi.VA_VOL_OPTS,
cinder_volume_manager.volume_manager_opts,
cinder_wsgi_eventletserver.socket_opts,
)),
('fc-zone-manager',
itertools.chain(
cinder_zonemanager_drivers_brocade_brcdfczonedriver.brcd_opts,
cinder_zonemanager_drivers_cisco_ciscofczonedriver.cisco_opts,
cinder_zonemanager_fczonemanager.zone_manager_opts,
)),
('key_manager',
itertools.chain(
cinder_keymgr_confkeymgr.key_mgr_opts,
)),
('service_user',
itertools.chain(
cinder_serviceauth.service_user_opts,
loading.get_auth_plugin_conf_options('v3password'),
loading.get_session_conf_options(),
)),
('backend_defaults',
itertools.chain(
cinder_volume_driver.volume_opts,
cinder_volume_driver.iser_opts,
cinder_volume_driver.nvmet_opts,
cinder_volume_driver.scst_opts,
cinder_volume_driver.image_opts,
cinder_volume_driver.fqdn_opts,
cinder_volume_drivers_ceph_rbdiscsi.RBD_ISCSI_OPTS,
cinder_volume_drivers_dell_emc_powerflex_driver.
powerflex_opts,
cinder_volume_drivers_dell_emc_powermax_common.powermax_opts,
cinder_volume_drivers_dell_emc_powerstore_driver.
POWERSTORE_OPTS,
cinder_volume_drivers_dell_emc_powervault_common.common_opts,
cinder_volume_drivers_dell_emc_powervault_common.iscsi_opts,
cinder_volume_drivers_dell_emc_sc_storagecentercommon.
common_opts,
cinder_volume_drivers_dell_emc_unity_driver.UNITY_OPTS,
cinder_volume_drivers_dell_emc_vnx_common.VNX_OPTS,
cinder_volume_drivers_dell_emc_xtremio.XTREMIO_OPTS,
cinder_volume_drivers_fujitsu_eternus_dx_eternusdxcommon.
FJ_ETERNUS_DX_OPT_opts,
cinder_volume_drivers_hitachi_hbsdcommon.COMMON_VOLUME_OPTS,
cinder_volume_drivers_hitachi_hbsdrest.REST_VOLUME_OPTS,
cinder_volume_drivers_hitachi_hbsdrestfc.FC_VOLUME_OPTS,
cinder_volume_drivers_hpe_hpe3parcommon.hpe3par_opts,
cinder_volume_drivers_hpe_nimble.nimble_opts,
cinder_volume_drivers_huawei_common.huawei_opts,
cinder_volume_drivers_ibm_flashsystemcommon.flashsystem_opts,
cinder_volume_drivers_ibm_flashsystemiscsi.
flashsystem_iscsi_opts,
cinder_volume_drivers_ibm_gpfs.gpfs_opts,
cinder_volume_drivers_ibm_gpfs.gpfs_remote_ssh_opts,
cinder_volume_drivers_ibm_ibm_storage_ds8kproxy.ds8k_opts,
cinder_volume_drivers_ibm_ibm_storage_ibmstorage.driver_opts,
cinder_volume_drivers_ibm_storwize_svc_storwizesvccommon.
storwize_svc_opts,
cinder_volume_drivers_ibm_storwize_svc_storwizesvcfc.
storwize_svc_fc_opts,
cinder_volume_drivers_ibm_storwize_svc_storwizesvciscsi.
storwize_svc_iscsi_opts,
cinder_volume_drivers_infinidat.infinidat_opts,
cinder_volume_drivers_kaminario_kaminariocommon.
kaminario_opts,
cinder_volume_drivers_lenovo_lenovocommon.common_opts,
cinder_volume_drivers_lenovo_lenovocommon.iscsi_opts,
cinder_volume_drivers_lightos.lightos_opts,
cinder_volume_drivers_linstordrv.linstor_opts,
cinder_volume_drivers_lvm.volume_opts,
cinder_volume_drivers_macrosan_driver.config.macrosan_opts,
cinder_volume_drivers_nec_v_necvrest.COMMON_VOLUME_OPTS,
cinder_volume_drivers_nec_v_necvrest.REST_VOLUME_OPTS,
cinder_volume_drivers_nec_v_necvrest.FC_VOLUME_OPTS,
cinder_volume_drivers_netapp_options.netapp_proxy_opts,
cinder_volume_drivers_netapp_options.netapp_connection_opts,
cinder_volume_drivers_netapp_options.netapp_transport_opts,
cinder_volume_drivers_netapp_options.netapp_basicauth_opts,
cinder_volume_drivers_netapp_options.netapp_cluster_opts,
cinder_volume_drivers_netapp_options.netapp_provisioning_opts,
cinder_volume_drivers_netapp_options.netapp_img_cache_opts,
cinder_volume_drivers_netapp_options.netapp_nfs_extra_opts,
cinder_volume_drivers_netapp_options.netapp_san_opts,
cinder_volume_drivers_netapp_options.netapp_replication_opts,
cinder_volume_drivers_netapp_options.netapp_support_opts,
cinder_volume_drivers_netapp_options.netapp_migration_opts,
cinder_volume_drivers_nexenta_options.NEXENTA_CONNECTION_OPTS,
cinder_volume_drivers_nexenta_options.NEXENTA_ISCSI_OPTS,
cinder_volume_drivers_nexenta_options.NEXENTA_DATASET_OPTS,
cinder_volume_drivers_nexenta_options.NEXENTA_NFS_OPTS,
cinder_volume_drivers_nexenta_options.NEXENTA_RRMGR_OPTS,
cinder_volume_drivers_nexenta_options.NEXENTA_EDGE_OPTS,
cinder_volume_drivers_nfs.nfs_opts,
cinder_volume_drivers_prophetstor_options.DPL_OPTS,
cinder_volume_drivers_pure.PURE_OPTS,
cinder_volume_drivers_qnap.qnap_opts,
cinder_volume_drivers_quobyte.volume_opts,
cinder_volume_drivers_rbd.RBD_OPTS,
cinder_volume_drivers_remotefs.nas_opts,
cinder_volume_drivers_remotefs.volume_opts,
cinder_volume_drivers_san_hp_hpmsacommon.common_opts,
cinder_volume_drivers_san_hp_hpmsacommon.iscsi_opts,
cinder_volume_drivers_san_san.san_opts,
cinder_volume_drivers_solidfire.sf_opts,
cinder_volume_drivers_storpool.storpool_opts,
cinder_volume_drivers_stx_common.common_opts,
cinder_volume_drivers_stx_common.iscsi_opts,
cinder_volume_drivers_synology_synologycommon.cinder_opts,
cinder_volume_drivers_vmware_vmdk.vmdk_opts,
cinder_volume_drivers_vzstorage.vzstorage_opts,
cinder_volume_drivers_windows_iscsi.windows_opts,
cinder_volume_drivers_windows_smbfs.volume_opts,
cinder_volume_drivers_zadara_zadara.common.zadara_opts,
cinder_volume_manager.volume_backend_opts,
cinder_volume_targets_spdknvmf.spdk_opts,
)),
('nova',
itertools.chain(
cinder_compute_nova.nova_opts,
cinder_compute_nova.nova_session_opts,
cinder_compute_nova.nova_auth_opts,
)),
]
|
|
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
import tarfile
import tensorflow as tf
from IPython.display import display, Image
from scipy import ndimage
from sklearn.linear_model import LogisticRegression
from six.moves.urllib.request import urlretrieve
from six.moves import cPickle as pickle
from PIL import Image
from six.moves import range
pickle_file = 'bengaliOCR.pickle'
with open(pickle_file, 'rb') as f:
save = pickle.load(f)
train_dataset = save['train_dataset']
train_labels = save['train_labels']
valid_dataset = save['valid_dataset']
valid_labels = save['valid_labels']
test_dataset = save['test_dataset']
test_labels = save['test_labels']
del save # hint to help gc free up memory
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
image_size = 50
num_labels = 50
num_channels = 1 # grayscale
# convnets in tensorflow require 4d tensor inputs [image_no, height, width, num_channels]. The following
# functin reformats the input dataset to form a 4d tensor, and reformats the labels as one-hot encodings
def reformat(dataset, labels):
dataset = dataset.reshape(
(-1, image_size, image_size, num_channels)).astype(np.float32)
labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)
return dataset, labels
train_dataset, train_labels = reformat(train_dataset, train_labels)
valid_dataset, valid_labels = reformat(valid_dataset, valid_labels)
test_dataset, test_labels = reformat(test_dataset, test_labels)
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
/ predictions.shape[0])
# Convolutional neural network using 2 convolutional layers, an inception module and then 2 fully-connected
# layers. The inception module consists of 1x1 convolutions that reduce the dimensions of the input to
# the module, followed by separate, parallel 3x3 and 5x5 convolutions. The input is also max-pooled with
# kernel size 3 and stride 1, followed by another 1x1 convolution. ReLU's are added before each of these paths
# are concatenated into one large output block. See README for details.
# Note that I used the Adam optimizer, because SGD would (mysteriously) not work, getting stuck at a loss
# of ~3.9. I also slightly reduced the batch size to 50 to allow faster training. This model yielded a
# 94.2% accuracy rate on the testset, an increase of 0.7% compared to the previous model without inception.
def deeper_inception_conv_net():
batch_size = 50
patch_size1 = 3
patch_size2 = 5
depth = 16
depth1 = 32
depth2 = 16
depth3 = 8
concat_depth = 48
num_hidden = 64
num_hidden2 = 32
keep_prob = 0.5
decay_step = 2000
base = 0.9
graph = tf.Graph()
with graph.as_default():
# Input data.
tf_train_dataset = tf.placeholder(
tf.float32, shape=(batch_size, image_size, image_size, num_channels))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
layer1_weights = tf.Variable(tf.truncated_normal(
[patch_size1, patch_size1, num_channels, depth], stddev=0.3))
layer1_biases = tf.Variable(tf.zeros([depth]))
layer2_weights = tf.Variable(tf.truncated_normal(
[patch_size2, patch_size2, depth, depth1], stddev=0.05))
layer2_biases = tf.Variable(tf.constant(0.0, shape=[depth1]))
layer3_weights = tf.Variable(tf.truncated_normal(
[((image_size + 3) // 4) * ((image_size + 3) // 4) * concat_depth, num_hidden], stddev=0.05))
layer3_biases = tf.Variable(tf.constant(0.0, shape=[num_hidden]))
layer4_weights = tf.Variable(tf.truncated_normal(
[num_hidden, num_hidden2], stddev=0.01))
layer4_biases = tf.Variable(tf.constant(0.0, shape=[num_hidden2]))
layer5_weights = tf.Variable(tf.truncated_normal(
[num_hidden2, num_labels], stddev=0.01))
layer5_biases = tf.Variable(tf.constant(0.0, shape=[num_labels]))
inception1x1_weights = tf.Variable(tf.truncated_normal(
[1, 1, depth1, depth2], stddev=0.25))
inception1x1_biases = tf.Variable(tf.constant(0.0, shape=[depth2]))
inception3x3_weights = tf.Variable(tf.truncated_normal(
[patch_size1, patch_size1, depth2, depth3], stddev=0.05))
inception3x3_biases = tf.Variable(tf.constant(0.0, shape=[depth3]))
inception5x5_weights = tf.Variable(tf.truncated_normal(
[patch_size2, patch_size2, depth2, depth3], stddev=0.08))
inception5x5_biases = tf.Variable(tf.constant(0.0, shape=[depth3]))
inception1x1_post_mxpool_wts = tf.Variable(tf.truncated_normal(
[1, 1, depth1, depth2], stddev=0.04))
post_maxpool_biases = tf.Variable(tf.constant(0.0, shape=[depth2]))
global_step = tf.Variable(0, trainable = False) # count the number of steps taken.
learning_rate = tf.train.exponential_decay(0.005, global_step, decay_step, base)
# Model.
def model(data, useDropout):
conv = tf.nn.conv2d(data, layer1_weights, [1, 1, 1, 1], padding='SAME')
max_pooled = tf.nn.max_pool(conv, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')
hidden = tf.nn.relu(max_pooled + layer1_biases)
conv = tf.nn.conv2d(hidden, layer2_weights, [1, 1, 1, 1], padding='SAME')
max_pooled = tf.nn.max_pool(conv, [1, 2, 2, 1], [1, 1, 1, 1], padding='SAME')
hidden = tf.nn.relu(max_pooled + layer2_biases)
inception1x1_conv = tf.nn.conv2d(hidden, inception1x1_weights, [1, 1, 1, 1], padding='SAME')
inception1x1_relu = tf.nn.relu(inception1x1_conv + inception1x1_biases)
inception3x3_conv = tf.nn.conv2d(inception1x1_relu, inception3x3_weights, [1, 1, 1, 1], padding='SAME')
inception3x3_relu = tf.nn.relu(inception3x3_conv + inception3x3_biases)
inception5x5_conv = tf.nn.conv2d(inception1x1_relu, inception5x5_weights, [1, 1, 1, 1], padding='SAME')
inception5x5_relu = tf.nn.relu(inception5x5_conv + inception5x5_biases)
inception3x3_maxpool = tf.nn.max_pool(hidden, [1, 3, 3, 1], [1, 1, 1, 1], padding='SAME')
inception1x1_post_maxpool = tf.nn.conv2d(inception3x3_maxpool, inception1x1_post_mxpool_wts, [1, 1, 1, 1], padding='SAME')
inception1x1_post_maxpool = tf.nn.relu(inception1x1_post_maxpool + post_maxpool_biases)
concat_filter = tf.concat(3, [inception1x1_relu, inception3x3_relu, inception5x5_relu, inception1x1_post_maxpool])
concat_maxpooled = tf.nn.max_pool(concat_filter, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')
shape = concat_maxpooled.get_shape().as_list()
reshape = tf.reshape(concat_maxpooled, [shape[0], shape[1] * shape[2] * shape[3]])
if useDropout == 1:
dropout_layer2 = tf.nn.dropout(tf.nn.relu(reshape), keep_prob)
else:
dropout_layer2 = tf.nn.relu(reshape)
hidden = tf.nn.relu(tf.matmul(dropout_layer2, layer3_weights) + layer3_biases)
hidden = tf.nn.relu(tf.matmul(hidden, layer4_weights) + layer4_biases)
return tf.matmul(hidden, layer5_weights) + layer5_biases
# Training computation.
logits = model(tf_train_dataset, 1)
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))
# Optimizer.
optimizer = tf.train.AdamOptimizer(0.001).minimize(loss, global_step=global_step)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(model(tf_train_dataset, 0))
valid_prediction = tf.nn.softmax(model(tf_valid_dataset, 0))
test_prediction = tf.nn.softmax(model(tf_test_dataset, 0))
num_steps = 30001
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print('Initialized')
for step in range(num_steps):
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
batch_data = train_dataset[offset:(offset + batch_size), :, :, :]
batch_labels = train_labels[offset:(offset + batch_size), :]
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 50 == 0):
print('Minibatch loss at step %d: %f' % (step, l))
print('Minibatch accuracy: %.1f%%' % accuracy(predictions, batch_labels))
#print(tf.Print(layer1_weights, [layer1_weights]).eval())
print('Validation accuracy: %.1f%%' % accuracy(
valid_prediction.eval(), valid_labels))
print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels))
deeper_inception_conv_net()
|
|
# coding: utf-8
from __future__ import unicode_literals
import itertools
import re
import random
from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
compat_str,
compat_urllib_parse_urlencode,
compat_urllib_parse_urlparse,
compat_urlparse,
)
from ..utils import (
ExtractorError,
int_or_none,
js_to_json,
orderedSet,
parse_duration,
parse_iso8601,
sanitized_Request,
urlencode_postdata,
)
class TwitchBaseIE(InfoExtractor):
_VALID_URL_BASE = r'https?://(?:www\.)?twitch\.tv'
_API_BASE = 'https://api.twitch.tv'
_USHER_BASE = 'https://usher.ttvnw.net'
_LOGIN_URL = 'http://www.twitch.tv/login'
_NETRC_MACHINE = 'twitch'
def _handle_error(self, response):
if not isinstance(response, dict):
return
error = response.get('error')
if error:
raise ExtractorError(
'%s returned error: %s - %s' % (self.IE_NAME, error, response.get('message')),
expected=True)
def _download_json(self, url, video_id, note='Downloading JSON metadata'):
headers = {
'Referer': 'http://api.twitch.tv/crossdomain/receiver.html?v=2',
'X-Requested-With': 'XMLHttpRequest',
}
for cookie in self._downloader.cookiejar:
if cookie.name == 'api_token':
headers['Twitch-Api-Token'] = cookie.value
request = sanitized_Request(url, headers=headers)
response = super(TwitchBaseIE, self)._download_json(request, video_id, note)
self._handle_error(response)
return response
def _real_initialize(self):
self._login()
def _login(self):
(username, password) = self._get_login_info()
if username is None:
return
login_page, handle = self._download_webpage_handle(
self._LOGIN_URL, None, 'Downloading login page')
login_form = self._hidden_inputs(login_page)
login_form.update({
'username': username,
'password': password,
})
redirect_url = handle.geturl()
post_url = self._search_regex(
r'<form[^>]+action=(["\'])(?P<url>.+?)\1', login_page,
'post url', default=redirect_url, group='url')
if not post_url.startswith('http'):
post_url = compat_urlparse.urljoin(redirect_url, post_url)
request = sanitized_Request(
post_url, urlencode_postdata(login_form))
request.add_header('Referer', redirect_url)
response = self._download_webpage(
request, None, 'Logging in as %s' % username)
error_message = self._search_regex(
r'<div[^>]+class="subwindow_notice"[^>]*>([^<]+)</div>',
response, 'error message', default=None)
if error_message:
raise ExtractorError(
'Unable to login. Twitch said: %s' % error_message, expected=True)
if '>Reset your password<' in response:
self.report_warning('Twitch asks you to reset your password, go to https://secure.twitch.tv/reset/submit')
def _prefer_source(self, formats):
try:
source = next(f for f in formats if f['format_id'] == 'Source')
source['preference'] = 10
except StopIteration:
pass # No Source stream present
self._sort_formats(formats)
class TwitchItemBaseIE(TwitchBaseIE):
def _download_info(self, item, item_id):
return self._extract_info(self._download_json(
'%s/kraken/videos/%s%s' % (self._API_BASE, item, item_id), item_id,
'Downloading %s info JSON' % self._ITEM_TYPE))
def _extract_media(self, item_id):
info = self._download_info(self._ITEM_SHORTCUT, item_id)
response = self._download_json(
'%s/api/videos/%s%s' % (self._API_BASE, self._ITEM_SHORTCUT, item_id), item_id,
'Downloading %s playlist JSON' % self._ITEM_TYPE)
entries = []
chunks = response['chunks']
qualities = list(chunks.keys())
for num, fragment in enumerate(zip(*chunks.values()), start=1):
formats = []
for fmt_num, fragment_fmt in enumerate(fragment):
format_id = qualities[fmt_num]
fmt = {
'url': fragment_fmt['url'],
'format_id': format_id,
'quality': 1 if format_id == 'live' else 0,
}
m = re.search(r'^(?P<height>\d+)[Pp]', format_id)
if m:
fmt['height'] = int(m.group('height'))
formats.append(fmt)
self._sort_formats(formats)
entry = dict(info)
entry['id'] = '%s_%d' % (entry['id'], num)
entry['title'] = '%s part %d' % (entry['title'], num)
entry['formats'] = formats
entries.append(entry)
return self.playlist_result(entries, info['id'], info['title'])
def _extract_info(self, info):
return {
'id': info['_id'],
'title': info.get('title') or 'Untitled Broadcast',
'description': info.get('description'),
'duration': int_or_none(info.get('length')),
'thumbnail': info.get('preview'),
'uploader': info.get('channel', {}).get('display_name'),
'uploader_id': info.get('channel', {}).get('name'),
'timestamp': parse_iso8601(info.get('recorded_at')),
'view_count': int_or_none(info.get('views')),
}
def _real_extract(self, url):
return self._extract_media(self._match_id(url))
class TwitchVideoIE(TwitchItemBaseIE):
IE_NAME = 'twitch:video'
_VALID_URL = r'%s/[^/]+/b/(?P<id>\d+)' % TwitchBaseIE._VALID_URL_BASE
_ITEM_TYPE = 'video'
_ITEM_SHORTCUT = 'a'
_TEST = {
'url': 'http://www.twitch.tv/riotgames/b/577357806',
'info_dict': {
'id': 'a577357806',
'title': 'Worlds Semifinals - Star Horn Royal Club vs. OMG',
},
'playlist_mincount': 12,
'skip': 'HTTP Error 404: Not Found',
}
class TwitchChapterIE(TwitchItemBaseIE):
IE_NAME = 'twitch:chapter'
_VALID_URL = r'%s/[^/]+/c/(?P<id>\d+)' % TwitchBaseIE._VALID_URL_BASE
_ITEM_TYPE = 'chapter'
_ITEM_SHORTCUT = 'c'
_TESTS = [{
'url': 'http://www.twitch.tv/acracingleague/c/5285812',
'info_dict': {
'id': 'c5285812',
'title': 'ACRL Off Season - Sports Cars @ Nordschleife',
},
'playlist_mincount': 3,
'skip': 'HTTP Error 404: Not Found',
}, {
'url': 'http://www.twitch.tv/tsm_theoddone/c/2349361',
'only_matching': True,
}]
class TwitchVodIE(TwitchItemBaseIE):
IE_NAME = 'twitch:vod'
_VALID_URL = r'%s/[^/]+/v/(?P<id>\d+)' % TwitchBaseIE._VALID_URL_BASE
_ITEM_TYPE = 'vod'
_ITEM_SHORTCUT = 'v'
_TESTS = [{
'url': 'http://www.twitch.tv/riotgames/v/6528877?t=5m10s',
'info_dict': {
'id': 'v6528877',
'ext': 'mp4',
'title': 'LCK Summer Split - Week 6 Day 1',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 17208,
'timestamp': 1435131709,
'upload_date': '20150624',
'uploader': 'Riot Games',
'uploader_id': 'riotgames',
'view_count': int,
'start_time': 310,
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
# Untitled broadcast (title is None)
'url': 'http://www.twitch.tv/belkao_o/v/11230755',
'info_dict': {
'id': 'v11230755',
'ext': 'mp4',
'title': 'Untitled Broadcast',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 1638,
'timestamp': 1439746708,
'upload_date': '20150816',
'uploader': 'BelkAO_o',
'uploader_id': 'belkao_o',
'view_count': int,
},
'params': {
# m3u8 download
'skip_download': True,
},
}]
def _real_extract(self, url):
item_id = self._match_id(url)
info = self._download_info(self._ITEM_SHORTCUT, item_id)
access_token = self._download_json(
'%s/api/vods/%s/access_token' % (self._API_BASE, item_id), item_id,
'Downloading %s access token' % self._ITEM_TYPE)
formats = self._extract_m3u8_formats(
'%s/vod/%s?%s' % (
self._USHER_BASE, item_id,
compat_urllib_parse_urlencode({
'allow_source': 'true',
'allow_audio_only': 'true',
'allow_spectre': 'true',
'player': 'twitchweb',
'nauth': access_token['token'],
'nauthsig': access_token['sig'],
})),
item_id, 'mp4', entry_protocol='m3u8_native')
self._prefer_source(formats)
info['formats'] = formats
parsed_url = compat_urllib_parse_urlparse(url)
query = compat_parse_qs(parsed_url.query)
if 't' in query:
info['start_time'] = parse_duration(query['t'][0])
return info
class TwitchPlaylistBaseIE(TwitchBaseIE):
_PLAYLIST_URL = '%s/kraken/channels/%%s/videos/?offset=%%d&limit=%%d' % TwitchBaseIE._API_BASE
_PAGE_LIMIT = 100
def _extract_playlist(self, channel_id):
info = self._download_json(
'%s/kraken/channels/%s' % (self._API_BASE, channel_id),
channel_id, 'Downloading channel info JSON')
channel_name = info.get('display_name') or info.get('name')
entries = []
offset = 0
limit = self._PAGE_LIMIT
broken_paging_detected = False
counter_override = None
for counter in itertools.count(1):
response = self._download_json(
self._PLAYLIST_URL % (channel_id, offset, limit),
channel_id,
'Downloading %s videos JSON page %s'
% (self._PLAYLIST_TYPE, counter_override or counter))
page_entries = self._extract_playlist_page(response)
if not page_entries:
break
total = int_or_none(response.get('_total'))
# Since the beginning of March 2016 twitch's paging mechanism
# is completely broken on the twitch side. It simply ignores
# a limit and returns the whole offset number of videos.
# Working around by just requesting all videos at once.
# Upd: pagination bug was fixed by twitch on 15.03.2016.
if not broken_paging_detected and total and len(page_entries) > limit:
self.report_warning(
'Twitch pagination is broken on twitch side, requesting all videos at once',
channel_id)
broken_paging_detected = True
offset = total
counter_override = '(all at once)'
continue
entries.extend(page_entries)
if broken_paging_detected or total and len(page_entries) >= total:
break
offset += limit
return self.playlist_result(
[self.url_result(entry) for entry in orderedSet(entries)],
channel_id, channel_name)
def _extract_playlist_page(self, response):
videos = response.get('videos')
return [video['url'] for video in videos] if videos else []
def _real_extract(self, url):
return self._extract_playlist(self._match_id(url))
class TwitchProfileIE(TwitchPlaylistBaseIE):
IE_NAME = 'twitch:profile'
_VALID_URL = r'%s/(?P<id>[^/]+)/profile/?(?:\#.*)?$' % TwitchBaseIE._VALID_URL_BASE
_PLAYLIST_TYPE = 'profile'
_TEST = {
'url': 'http://www.twitch.tv/vanillatv/profile',
'info_dict': {
'id': 'vanillatv',
'title': 'VanillaTV',
},
'playlist_mincount': 412,
}
class TwitchPastBroadcastsIE(TwitchPlaylistBaseIE):
IE_NAME = 'twitch:past_broadcasts'
_VALID_URL = r'%s/(?P<id>[^/]+)/profile/past_broadcasts/?(?:\#.*)?$' % TwitchBaseIE._VALID_URL_BASE
_PLAYLIST_URL = TwitchPlaylistBaseIE._PLAYLIST_URL + '&broadcasts=true'
_PLAYLIST_TYPE = 'past broadcasts'
_TEST = {
'url': 'http://www.twitch.tv/spamfish/profile/past_broadcasts',
'info_dict': {
'id': 'spamfish',
'title': 'Spamfish',
},
'playlist_mincount': 54,
}
class TwitchStreamIE(TwitchBaseIE):
IE_NAME = 'twitch:stream'
_VALID_URL = r'%s/(?P<id>[^/#?]+)/?(?:\#.*)?$' % TwitchBaseIE._VALID_URL_BASE
_TESTS = [{
'url': 'http://www.twitch.tv/shroomztv',
'info_dict': {
'id': '12772022048',
'display_id': 'shroomztv',
'ext': 'mp4',
'title': 're:^ShroomzTV [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'description': 'H1Z1 - lonewolfing with ShroomzTV | A3 Battle Royale later - @ShroomzTV',
'is_live': True,
'timestamp': 1421928037,
'upload_date': '20150122',
'uploader': 'ShroomzTV',
'uploader_id': 'shroomztv',
'view_count': int,
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
'url': 'http://www.twitch.tv/miracle_doto#profile-0',
'only_matching': True,
}]
def _real_extract(self, url):
channel_id = self._match_id(url)
stream = self._download_json(
'%s/kraken/streams/%s' % (self._API_BASE, channel_id), channel_id,
'Downloading stream JSON').get('stream')
# Fallback on profile extraction if stream is offline
if not stream:
return self.url_result(
'http://www.twitch.tv/%s/profile' % channel_id,
'TwitchProfile', channel_id)
# Channel name may be typed if different case than the original channel name
# (e.g. http://www.twitch.tv/TWITCHPLAYSPOKEMON) that will lead to constructing
# an invalid m3u8 URL. Working around by use of original channel name from stream
# JSON and fallback to lowercase if it's not available.
channel_id = stream.get('channel', {}).get('name') or channel_id.lower()
access_token = self._download_json(
'%s/api/channels/%s/access_token' % (self._API_BASE, channel_id), channel_id,
'Downloading channel access token')
query = {
'allow_source': 'true',
'allow_audio_only': 'true',
'p': random.randint(1000000, 10000000),
'player': 'twitchweb',
'segment_preference': '4',
'sig': access_token['sig'].encode('utf-8'),
'token': access_token['token'].encode('utf-8'),
}
formats = self._extract_m3u8_formats(
'%s/api/channel/hls/%s.m3u8?%s'
% (self._USHER_BASE, channel_id, compat_urllib_parse_urlencode(query)),
channel_id, 'mp4')
self._prefer_source(formats)
view_count = stream.get('viewers')
timestamp = parse_iso8601(stream.get('created_at'))
channel = stream['channel']
title = self._live_title(channel.get('display_name') or channel.get('name'))
description = channel.get('status')
thumbnails = []
for thumbnail_key, thumbnail_url in stream['preview'].items():
m = re.search(r'(?P<width>\d+)x(?P<height>\d+)\.jpg$', thumbnail_key)
if not m:
continue
thumbnails.append({
'url': thumbnail_url,
'width': int(m.group('width')),
'height': int(m.group('height')),
})
return {
'id': compat_str(stream['_id']),
'display_id': channel_id,
'title': title,
'description': description,
'thumbnails': thumbnails,
'uploader': channel.get('display_name'),
'uploader_id': channel.get('name'),
'timestamp': timestamp,
'view_count': view_count,
'formats': formats,
'is_live': True,
}
class TwitchClipsIE(InfoExtractor):
IE_NAME = 'twitch:clips'
_VALID_URL = r'https?://clips\.twitch\.tv/(?:[^/]+/)*(?P<id>[^/?#&]+)'
_TEST = {
'url': 'https://clips.twitch.tv/ea/AggressiveCobraPoooound',
'md5': '761769e1eafce0ffebfb4089cb3847cd',
'info_dict': {
'id': 'AggressiveCobraPoooound',
'ext': 'mp4',
'title': 'EA Play 2016 Live from the Novo Theatre',
'thumbnail': 're:^https?://.*\.jpg',
'creator': 'EA',
'uploader': 'stereotype_',
'uploader_id': 'stereotype_',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
clip = self._parse_json(
self._search_regex(
r'(?s)clipInfo\s*=\s*({.+?});', webpage, 'clip info'),
video_id, transform_source=js_to_json)
video_url = clip['clip_video_url']
title = clip['channel_title']
return {
'id': video_id,
'url': video_url,
'title': title,
'thumbnail': self._og_search_thumbnail(webpage),
'creator': clip.get('broadcaster_display_name') or clip.get('broadcaster_login'),
'uploader': clip.get('curator_login'),
'uploader_id': clip.get('curator_display_name'),
}
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for index searcher."""
import os
import re
import unittest
import mock
from dfdewey.utils.image_processor import FileEntryScanner
from dfdewey.utils.index_searcher import IndexSearcher
TEST_CASE = 'testcase'
TEST_IMAGE = 'test.dd'
TEST_IMAGE_HASH = 'd41d8cd98f00b204e9800998ecf8427e'
TEST_IMAGE_ID = 'd41d8cd98f00b204e9800998ecf8427e'
class IndexSearcherTest(unittest.TestCase):
"""Tests for index searcher."""
def _get_index_searcher(self):
"""Get a test index searcher.
Returns:
Test index searcher.
"""
with mock.patch('psycopg2.connect'), mock.patch(
'dfdewey.datastore.postgresql.PostgresqlDataStore.query_single_row'
) as mock_query_single_row:
mock_query_single_row.return_value = (TEST_IMAGE_HASH,)
index_searcher = IndexSearcher(TEST_CASE, TEST_IMAGE_ID, TEST_IMAGE)
index_searcher.config = None
return index_searcher
@mock.patch('dfdewey.datastore.postgresql.PostgresqlDataStore.query')
def test_get_case_images(self, mock_query):
"""Test get case images method."""
mock_query.return_value = [(
'hash1',
'image1.dd',
), (
'hash2',
'image2.dd',
)]
with mock.patch('psycopg2.connect'):
index_searcher = IndexSearcher(TEST_CASE, None, 'all')
self.assertEqual(index_searcher.images['hash1'], 'image1.dd')
self.assertEqual(index_searcher.images['hash2'], 'image2.dd')
@mock.patch('dfdewey.datastore.postgresql.PostgresqlDataStore.query')
def test_get_filenames_from_inode(self, mock_query):
"""Test get filenames from inode method."""
index_searcher = self._get_index_searcher()
mock_query.return_value = [('test.txt',), ('test.txt:ads',)]
filenames = index_searcher._get_filenames_from_inode(42, '/p1')
self.assertEqual(len(filenames), 2)
self.assertEqual(filenames[0], 'test.txt')
self.assertEqual(filenames[1], 'test.txt:ads')
@mock.patch('dfdewey.utils.index_searcher.IndexSearcher._get_inodes')
@mock.patch(
'dfdewey.utils.index_searcher.IndexSearcher._get_filenames_from_inode')
@mock.patch(
'dfdewey.datastore.postgresql.PostgresqlDataStore.switch_database')
def test_get_filenames_from_offset(
self, mock_switch_database, mock_get_filenames_from_inode,
mock_get_inodes):
"""Test get filenames from offset method."""
index_searcher = self._get_index_searcher()
current_path = os.path.abspath(os.path.dirname(__file__))
image_path = os.path.join(current_path, '..', '..', 'test_data', 'test.dd')
# Test offset not within a file
filenames = index_searcher._get_filenames_from_offset(
image_path, TEST_IMAGE_HASH, 1048579)
mock_switch_database.assert_called_once_with(
db_name=''.join(('fs', TEST_IMAGE_HASH)))
self.assertIsInstance(index_searcher.scanner, FileEntryScanner)
mock_get_inodes.assert_called_once_with(0, '/p1')
self.assertEqual(filenames, [])
# Test offset within a file
mock_get_inodes.reset_mock()
mock_get_inodes.return_value = [(0,)]
mock_get_filenames_from_inode.return_value = ['adams.txt']
filenames = index_searcher._get_filenames_from_offset(
image_path, TEST_IMAGE_HASH, 1133936)
mock_get_inodes.assert_called_once_with(20, '/p1')
mock_get_filenames_from_inode.assert_called_once_with(67, '/p1')
self.assertEqual(filenames, ['adams.txt (67)'])
# Test volume image
mock_get_inodes.reset_mock()
mock_get_inodes.return_value = [(2,)]
mock_get_filenames_from_inode.reset_mock()
mock_get_filenames_from_inode.return_value = []
image_path = os.path.join(
current_path, '..', '..', 'test_data', 'test_volume.dd')
filenames = index_searcher._get_filenames_from_offset(
image_path, TEST_IMAGE_HASH, 334216)
mock_get_inodes.assert_called_once_with(326, '/')
mock_get_filenames_from_inode.assert_called_once_with(2, '/')
self.assertEqual(filenames, [' (2)'])
# Test missing image
index_searcher.scanner = None
filenames = index_searcher._get_filenames_from_offset(
'test.dd', TEST_IMAGE_HASH, 1048579)
self.assertEqual(filenames, [])
def test_highlight_hit(self):
"""Test highlight hit method."""
index_searcher = self._get_index_searcher()
data = 'test1 test2 test3'
hit_positions = re.finditer('test3', data)
wrapped_data = ['test1', 'test2', 'test3']
result = index_searcher._highlight_hit(wrapped_data, hit_positions)
self.assertEqual(
result, ['test1', 'test2', '\u001b[31m\u001b[1mtest3\u001b[0m'])
hit_positions = re.finditer('st1 test2 te', data)
wrapped_data = ['test1', 'test2', 'test3']
result = index_searcher._highlight_hit(wrapped_data, hit_positions)
self.assertEqual(
result, [
'te\u001b[31m\u001b[1mst1\u001b[0m',
'\u001b[31m\u001b[1mtest2\u001b[0m',
'\u001b[31m\u001b[1mte\u001b[0mst3'
])
@mock.patch('logging.Logger.info')
@mock.patch('dfdewey.datastore.opensearch.OpenSearchDataStore.search')
def test_list_search(self, mock_search, mock_output):
"""Test list search."""
index_searcher = self._get_index_searcher()
index_searcher.images = {TEST_IMAGE_HASH: TEST_IMAGE}
current_path = os.path.abspath(os.path.dirname(__file__))
query_list = os.path.join(
current_path, '..', '..', 'test_data', 'wordlist.txt')
mock_search.return_value = {'hits': {'total': {'value': 1}}}
index_searcher.list_search(query_list)
self.assertEqual(mock_search.call_count, 8)
mock_output.assert_called_once()
self.assertEqual(mock_output.call_args.args[1], TEST_IMAGE)
self.assertEqual(mock_output.call_args.args[2], TEST_IMAGE_HASH)
self.assertEqual(mock_output.call_args.args[3], query_list)
# Test no results
mock_output.reset_mock()
mock_search.return_value = {'hits': {'total': {'value': 0}}}
index_searcher.list_search(query_list)
mock_output.assert_called_once()
self.assertEqual(mock_output.call_args.args[4], 'No results.')
@mock.patch('logging.Logger.info')
@mock.patch('dfdewey.datastore.postgresql.PostgresqlDataStore')
@mock.patch('dfdewey.datastore.opensearch.OpenSearchDataStore.search')
def test_search(self, mock_search, mock_postgresql, mock_output):
"""Test search method."""
index_searcher = self._get_index_searcher()
current_path = os.path.abspath(os.path.dirname(__file__))
image_path = os.path.join(current_path, '..', '..', 'test_data', 'test.dd')
index_searcher.images = {TEST_IMAGE_HASH: image_path}
index_searcher.postgresql = mock_postgresql
mock_search.return_value = {
'took': 2,
'hits': {
'total': {
'value': 1
},
'hits': [{
'_source': {
'offset': 12889600,
'file_offset': 'GZIP-0',
'data': 'test'
}
}]
}
}
# Test with highlighting
index_searcher.search('test', True)
mock_search.assert_called_once()
output_calls = mock_output.mock_calls
self.assertEqual(output_calls[0].args[1], image_path)
self.assertEqual(output_calls[0].args[2], TEST_IMAGE_HASH)
self.assertEqual(output_calls[0].args[3], 'test')
self.assertEqual(output_calls[1].args[1], 1)
self.assertEqual(output_calls[1].args[2], 2)
table_output = output_calls[1].args[3]
self.assertEqual(table_output[76:84], '12889600')
self.assertEqual(table_output[106:123], '\u001b[31m\u001b[1mtest\u001b[0m')
self.assertEqual(table_output[124:130], 'GZIP-0')
# Test without highlighting
mock_search.reset_mock()
mock_output.reset_mock()
index_searcher.search('test')
mock_search.assert_called_once()
output_calls = mock_output.mock_calls
self.assertEqual(output_calls[0].args[1], image_path)
self.assertEqual(output_calls[0].args[2], TEST_IMAGE_HASH)
self.assertEqual(output_calls[0].args[3], 'test')
self.assertEqual(output_calls[1].args[1], 1)
self.assertEqual(output_calls[1].args[2], 2)
table_output = output_calls[1].args[3]
self.assertEqual(table_output[76:84], '12889600')
self.assertEqual(table_output[106:110], 'test')
self.assertEqual(table_output[111:117], 'GZIP-0')
def test_wrap_filenames(self):
"""Test wrap filenames method."""
index_searcher = self._get_index_searcher()
filenames = ['aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa']
filenames = index_searcher._wrap_filenames(filenames, width=20)
expected_filenames = [
'aaaaaaaaaaaaaaaaaaaa\naaaaaaaaaaaaaaaaaaaa\naaaaaaaaaaaaaaaaaaaa'
]
self.assertEqual(filenames, expected_filenames)
if __name__ == '__main__':
unittest.main()
|
|
"""Utility functions for working with great_expectations within jupyter notebooks or jupyter lab.
"""
import logging
import sys
from datetime import datetime
import pandas as pd
import tzlocal
from IPython.core.display import HTML, display
from packaging import version
pd.set_option("display.max_rows", None)
pd.set_option("display.max_columns", None)
pd.set_option("display.width", None)
if version.parse(pd.__version__) <= version.parse("1.0.0"):
# support for negative integers was deprecated in version 1.0.1
pd.set_option("display.max_colwidth", -1)
else:
pd.set_option("display.max_colwidth", None)
from great_expectations.render.renderer import (
ExpectationSuiteColumnSectionRenderer,
ProfilingResultsColumnSectionRenderer,
ValidationResultsColumnSectionRenderer,
)
from great_expectations.render.view import DefaultJinjaSectionView
def set_data_source(context, data_source_type=None):
"""
TODO: Needs a docstring and tests.
"""
data_source_name = None
if not data_source_type:
configured_datasources = [
datasource for datasource in context.list_datasources()
]
if len(configured_datasources) == 0:
display(
HTML(
"""
<p>
No data sources found in the great_expectations.yml of your project.
</p>
<p>
If you did not create the data source during init, here is how to add it now: <a href="https://great-expectations.readthedocs.io/en/latest/how_to_add_data_source.html">How To Add a Data Source</a>
</p>
"""
)
)
elif len(configured_datasources) > 1:
display(
HTML(
"""
<p>
Found more than one data source in the great_expectations.yml of your project:
<b>{1:s}</b>
</p>
<p>
Uncomment the next cell and set data_source_name to one of these names.
</p>
""".format(
data_source_type,
",".join(
[
datasource["name"]
for datasource in configured_datasources
]
),
)
)
)
else:
data_source_name = configured_datasources[0]["name"]
display(
HTML(
"Will be using this data source from your project's great_expectations.yml: <b>{:s}</b>".format(
data_source_name
)
)
)
else:
configured_datasources = [
datasource["name"]
for datasource in context.list_datasources()
if datasource["type"] == data_source_type
]
if len(configured_datasources) == 0:
display(
HTML(
"""
<p>
No {:s} data sources found in the great_expectations.yml of your project.
</p>
<p>
If you did not create the data source during init, here is how to add it now: <a href="https://great-expectations.readthedocs.io/en/latest/how_to_add_data_source.html">How To Add a Data Source</a>
</p>
""".format(
data_source_type
)
)
)
elif len(configured_datasources) > 1:
display(
HTML(
"""
<p>
Found more than one {:s} data source in the great_expectations.yml of your project:
<b>{:s}</b>
</p>
<p>
Uncomment the next cell and set data_source_name to one of these names.
</p>
""".format(
data_source_type, ",".join(configured_datasources)
)
)
)
else:
data_source_name = configured_datasources[0]
display(
HTML(
"Will be using this {:s} data source from your project's great_expectations.yml: <b>{:s}</b>".format(
data_source_type, data_source_name
)
)
)
return data_source_name
def setup_notebook_logging(logger=None, log_level=logging.INFO):
"""Set up the provided logger for the GE default logging configuration.
Args:
logger - the logger to configure
"""
def posix2local(timestamp, tz=tzlocal.get_localzone()):
"""Seconds since the epoch -> local time as an aware datetime object."""
return datetime.fromtimestamp(timestamp, tz)
class Formatter(logging.Formatter):
def converter(self, timestamp):
return posix2local(timestamp)
def formatTime(self, record, datefmt=None):
dt = self.converter(record.created)
if datefmt:
s = dt.strftime(datefmt)
else:
t = dt.strftime(self.default_time_format)
s = self.default_msec_format % (t, record.msecs)
return s
if not logger:
logger = logging.getLogger("great_expectations")
chandler = logging.StreamHandler(stream=sys.stdout)
chandler.setLevel(logging.DEBUG)
# chandler.setFormatter(Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s", "%Y-%m-%dT%H:%M:%S%z"))
chandler.setFormatter(
Formatter("%(asctime)s - %(levelname)s - %(message)s", "%Y-%m-%dT%H:%M:%S%z")
)
logger.addHandler(chandler)
logger.setLevel(log_level)
logger.info(
f"Great Expectations logging enabled at {log_level} level by JupyterUX module."
)
#
# # Filter warnings
# import warnings
# warnings.filterwarnings('ignore')
def show_available_data_asset_names(context, data_source_name=None):
"""List asset names found in the current context."""
# TODO: Needs tests.
styles = """
<style type='text/css'>
ul.data-assets {
margin-top: 0px;
}
ul.data-assets li {
line-height: 1.2em;
list-style-type: circle;
}
ul.data-assets li span.expectation-suite {
background: #ddd;
}
</style>
"""
print("Inspecting your data sources. This may take a moment...")
expectation_suite_keys = context.list_expectation_suites()
datasources = context.list_datasources()
html = ""
for datasource in datasources:
if data_source_name and datasource["name"] != data_source_name:
continue
html += "<h2 style='margin: 0'>Datasource: {:s} ({:s})</h2>".format(
datasource["name"], datasource["class_name"]
)
ds = context.get_datasource(datasource["name"])
generators = ds.list_batch_kwargs_generators()
for generator_info in generators:
html += "batch_kwargs_generator: {:s} ({:s})".format(
generator_info["name"], generator_info["class_name"]
)
generator = ds.get_batch_kwargs_generator(generator_info["name"])
# TODO hacks to deal w/ inconsistent return types. Remove urgently
mystery_object = generator.get_available_data_asset_names()
if isinstance(mystery_object, dict) and "names" in mystery_object.keys():
data_asset_names = sorted(name[0] for name in mystery_object["names"])
elif isinstance(mystery_object, list):
data_asset_names = sorted(mystery_object)
else:
data_asset_names = []
if len(data_asset_names) > 0:
html += "<h3 style='margin: 0.2em 0'>Data Assets Found:</h3>"
html += styles
html += "<ul class='data-assets'>"
for data_asset_name in data_asset_names:
html += f"<li>{data_asset_name:s}</li>"
data_asset_expectation_suite_keys = [
es_key
for es_key in expectation_suite_keys
if es_key.data_asset_name.datasource == datasource["name"]
and es_key.data_asset_name.generator == generator_info["name"]
and es_key.data_asset_name.generator_asset == data_asset_name
]
if len(data_asset_expectation_suite_keys) > 0:
html += "<ul>"
for es_key in data_asset_expectation_suite_keys:
html += "<li><span class='expectation-suite'>Expectation Suite</span>: {:s}</li>".format(
es_key.expectation_suite_name
)
html += "</ul>"
html += "</ul>"
else:
display(
HTML(
"""<p>No data assets found in this data source.</p>
<p>Read about how batch kwargs generators derive data assets from data sources:
<a href="https://great-expectations.readthedocs.io/en/latest/how_to_add_data_source.html">Data assets</a>
</p>"""
)
)
display(HTML(html))
# TODO: add expectation suite names (existing)
bootstrap_link_element = """<link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css" integrity="sha384-ggOyR0iXCbMQv3Xipma34MD+dH/1fQ784/j6cY/iJTQUOhcWr7x9JvoRxT2MZw1T" crossorigin="anonymous">"""
cooltip_style_element = """<style type="text/css">
.cooltip {
display:inline-block;
position:relative;
text-align:left;
}
.cooltip .top {
min-width:200px;
top:-6px;
left:50%;
transform:translate(-50%, -100%);
padding:10px 20px;
color:#FFFFFF;
background-color:#222222;
font-weight:normal;
font-size:13px;
border-radius:8px;
position:absolute;
z-index:99999999;
box-sizing:border-box;
box-shadow:0 1px 8px rgba(0,0,0,0.5);
display:none;
}
.cooltip:hover .top {
display:block;
}
.cooltip .top i {
position:absolute;
top:100%;
left:50%;
margin-left:-12px;
width:24px;
height:12px;
overflow:hidden;
}
.cooltip .top i::after {
content:'';
position:absolute;
width:12px;
height:12px;
left:50%;
transform:translate(-50%,-50%) rotate(45deg);
background-color:#222222;
box-shadow:0 1px 8px rgba(0,0,0,0.5);
}
</style>
"""
def _render_for_jupyter(
view,
include_styling,
return_without_displaying,
):
if include_styling:
html_to_display = bootstrap_link_element + cooltip_style_element + view
else:
html_to_display = view
if return_without_displaying:
return html_to_display
else:
display(HTML(html_to_display))
def display_column_expectations_as_section(
expectation_suite,
column,
include_styling=True,
return_without_displaying=False,
):
"""This is a utility function to render all of the Expectations in an ExpectationSuite with the same column name as an HTML block.
By default, the HTML block is rendered using ExpectationSuiteColumnSectionRenderer and the view is rendered using DefaultJinjaSectionView.
Therefore, it should look exactly the same as the default renderer for build_docs.
Example usage:
exp = context.get_expectation_suite("notable_works_by_charles_dickens", "BasicDatasetProfiler")
display_column_expectations_as_section(exp, "Type")
"""
# TODO: replace this with a generic utility function, preferably a method on an ExpectationSuite class
column_expectation_list = [
e
for e in expectation_suite.expectations
if "column" in e.kwargs and e.kwargs["column"] == column
]
# TODO: Handle the case where zero evrs match the column name
document = (
ExpectationSuiteColumnSectionRenderer()
.render(column_expectation_list)
.to_json_dict()
)
view = DefaultJinjaSectionView().render({"section": document, "section_loop": 1})
return _render_for_jupyter(
view,
include_styling,
return_without_displaying,
)
def display_profiled_column_evrs_as_section(
evrs,
column,
include_styling=True,
return_without_displaying=False,
):
"""This is a utility function to render all of the EVRs in an ExpectationSuite with the same column name as an HTML block.
By default, the HTML block is rendered using ExpectationSuiteColumnSectionRenderer and the view is rendered using DefaultJinjaSectionView.
Therefore, it should look exactly the same as the default renderer for build_docs.
Example usage:
display_column_evrs_as_section(exp, "my_column")
WARNING: This method is experimental.
"""
# TODO: replace this with a generic utility function, preferably a method on an ExpectationSuite class
column_evr_list = [
e
for e in evrs.results
if "column" in e.expectation_config.kwargs
and e.expectation_config.kwargs["column"] == column
]
# TODO: Handle the case where zero evrs match the column name
document = (
ProfilingResultsColumnSectionRenderer().render(column_evr_list).to_json_dict()
)
view = DefaultJinjaSectionView().render(
{
"section": document,
"section_loop": {"index": 1},
}
)
return _render_for_jupyter(
view,
include_styling,
return_without_displaying,
)
def display_column_evrs_as_section(
evrs,
column,
include_styling=True,
return_without_displaying=False,
):
"""
Display validation results for a single column as a section.
WARNING: This method is experimental.
"""
# TODO: replace this with a generic utility function, preferably a method on an ExpectationSuite class
column_evr_list = [
e
for e in evrs.results
if "column" in e.expectation_config.kwargs
and e.expectation_config.kwargs["column"] == column
]
# TODO: Handle the case where zero evrs match the column name
document = (
ValidationResultsColumnSectionRenderer().render(column_evr_list).to_json_dict()
)
view = DefaultJinjaSectionView().render(
{
"section": document,
"section_loop": {"index": 1},
}
)
return _render_for_jupyter(
view,
include_styling,
return_without_displaying,
)
# When importing the jupyter_ux module, we set up a preferred logging configuration
logger = logging.getLogger("great_expectations")
setup_notebook_logging(logger)
|
|
#!/usr/bin/env python3
"""
@name: Pyhouse.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2010-2020 by D. Brian Kimmel
@note: Created on Mar 1, 2014
@license: MIT License
@summary: This is the core of the PyHouse daemon.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights2
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
PyHouse.
Computer Configuration File
House Configuration File
Services Internal
Twisted Internal
Xml Internal
Uses I{Epytext} mark-up for documentation.
see C{Modules.__init__.py} for Core documentation.
During development this is run by hand.
It is, however, planned to be a daemon that is kicked off on system start-up.
It now (2013) runs on Raspberry Pi so that is the primary target.
There are two components of this software.
The first is "computer' and is started first.
The second is 'house' and is started second.
See those modules to find out what each does.
Idea Links:
https://github.com/TheThingSystem/home-controller forked from automategreen/home-controller
https://github.com/zonyl/pytomation
https://github.com/king-dopey/pytomation forked from zonyl/pytomation
http://leftovercode.info/smartlinc.php
https://github.com/hollie/misterhouse/
https://github.com/hollie/misterhouse/blob/master/lib/Insteon/AllLinkDatabase.pm
SmartHome Wiki: Using Custom Commands in SmartLinc
SmartHome Forum: SmartLinc Direct Command for Light Status?
SmartHome Forum: Custom Screens on the SmartLinc
SmartHome Wiki: Insteon Command Table
Smarthome Forum: SmartLinc web automation solved
SmartHome Forum: 2412N Insteon Central Controller - Software
Ramp Rate
Insteon Commands
"""
__updated__ = '2020-02-17'
__version_info__ = (20, 2, 4)
__version__ = '.'.join(map(str, __version_info__))
# Import system type stuff
import errno
import fcntl
import os
import platform
import signal
import sys
# Import PyHouse files and modules.
from Modules.Core.setup_config import CheckInitialSetup
from Modules.Core import core
from Modules.Core import logging_pyh as Logger
LOG = Logger.getLogger('PyHouse ')
LOCK_PATH = os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), "lock")
g_Api = None
class Singleton:
""" Set up the singleton pattern.
This prevents more than one instance of PyHouse running on this computer.
It is the very first action taken when starting PyHouse to run.
"""
def __init__(self):
self.fh = None
self.is_running = False
self.do_magic()
def do_magic(self):
try:
self.fh = open(LOCK_PATH, 'w')
fcntl.lockf(self.fh, fcntl.LOCK_EX | fcntl.LOCK_NB)
except EnvironmentError:
if self.fh is not None:
self.is_running = True
else:
raise
def clean_up(self):
try:
if self.fh is not None:
fcntl.lockf(self.fh, fcntl.LOCK_UN)
self.fh.close() # ???
os.unlink(LOCK_PATH)
except Exception as e_err:
LOG.exception(e_err)
raise # for debugging purposes, do not raise it on production
def daemonize():
"""Taken from twisted.scripts._twistd_unix.py
"""
if os.fork(): # launch child and...
os._exit(0) # kill off parent
os.setsid()
if os.fork(): # launch child and...
os._exit(0) # kill off parent again.
os.umask(127)
null = os.open('/dev/null', os.O_RDWR)
for i in range(3):
try:
os.dup2(null, i)
except OSError as e:
if e.errno != errno.EBADF:
raise
os.close(null)
def handle_signals():
"""Handle Signals
typing the interrupt character (probably Ctrl-C) causes SIGINT to be sent
typing the quit character (probably Ctrl-\) sends SIGQUIT.
hanging up the phone (modem) sends SIGHUP
typing the stop character (probably Ctrl-Z) sends SIGSTOP.
"""
LOG.info('Setting up signal handlers.')
if platform.uname()[0] != 'Windows':
signal.signal(signal.SIGHUP, SigHupHandler)
signal.signal(signal.SIGINT, SigIntHandler)
signal.signal(signal.SIGTERM, SigTermHandler)
def SigHupHandler(signum, _stackframe):
"""
"""
LOG.debug('Hup Signal handler called with signal {}'.format(signum))
g_Api.Stop()
g_Api.Start()
def SigIntHandler(signum, _stackframe):
""" SigInt
interrupt character (probably Ctrl-C)
"""
LOG.debug('SigInt - Signal handler called with signal {}'.format(signum))
LOG.info("Interrupted.\n\n\n")
g_Api.Quit()
exit
def SigTermHandler(signum, _stackframe):
"""SigTerm
"""
LOG.debug('SigTerm - Signal handler called with signal {}'.format(signum))
LOG.info('SigTerm \n')
exit
def SigKillHandler(signum, _stackframe):
"""
"""
LOG.debug('SigKill - Signal handler called with signal {}'.format(signum))
LOG.info('SigKill \n')
exit
class Api:
"""
"""
m_core = None
m_pyhouse_obj = {}
def __init__(self):
""" This is the startup of the entire system.
All permanent services are started here.
These Core routines are an integral part of the daemon process.
Notice that the reactor starts here as the very last step here and that
call never returns until the reactor is stopped (permanent stoppage).
"""
global g_Api
g_Api = self
self.m_core = core.Api()
self.m_core._init_core()
LOG.info('Initialized.\n==================================================================\n')
def LoadConfig(self):
""" This loads all the configuration.
"""
self.m_core.LoadConfig()
self.m_pyhouse_obj._Twisted.Reactor.callLater(3, self.Start)
LOG.info("Loaded Config - Version:{}\n======================== Loaded Config Files ========================\n".format(__version__))
pass
def Start(self):
""" This is automatically invoked when the reactor starts from Api().
"""
print('Reactor is now running.')
LOG.info('Starting - Reactor is now running.')
self.m_core.Start()
LOG.info('Everything has been started\n-----------------------------------------\n')
def Stop(self):
"""Stop various modules to prepare for restarting them.
"""
self.m_core.Stop()
LOG.info("Stopped.\n")
def Quit(self):
"""Prepare to exit all of PyHouse.
"""
LOG.debug('Running Quit now.')
self.Stop()
self.m_pyhouse_obj._Twisted.Reactor.stop()
class BeforeReactor(Api):
""" This class is for initialization before the reactor starts.
It is run right after Singleton protection is invoked.
"""
m_pyhouse_obj = None
def start_setup(self):
"""
Notice that the reactor starts here as the very last step here and that
call never returns until the reactor is stopped (permanent stoppage).
"""
print('PyHouse.BeforeReactor()') # For development - so we can see when we get to this point...
CheckInitialSetup()
core.Api()
#
# When the reactor stops we continue here
#
LOG.info("PyHouse says Bye Now.\n")
print('PyHouse is exiting.')
raise SystemExit("PyHouse says Bye Now.")
if __name__ == "__main__":
si = Singleton()
try:
if si.is_running:
sys.exit("This app is already running!")
BeforeReactor().start_setup()
finally:
si.clean_up()
# ## END DBK
|
|
# -*- coding: utf-8 -*-
'''
Payment Gateway Transaction
:copyright: (c) 2013-2014 by Openlabs Technologies & Consulting (P) Ltd.
:license: BSD, see LICENSE for more details
'''
from trytond.pool import PoolMeta, Pool
from trytond.pyson import Eval
from trytond.model import fields
from .beanstream_api import BeanstreamClient, CreditCard
__all__ = [
'PaymentGatewayBeanstream', 'BeanstreamTransaction',
'AddPaymentProfileView', 'Address', 'AddPaymentProfile',
]
__metaclass__ = PoolMeta
BEANSTREAM_STATES = {
'required': Eval('provider') == 'beanstream',
'invisible': Eval('provider') != 'beanstream'
}
class PaymentGatewayBeanstream:
"Beanstream Gateway Implementation"
__name__ = 'payment_gateway.gateway'
beanstream_merchant_id = fields.Char(
'Merchant ID', states=BEANSTREAM_STATES, depends=['provider']
)
beanstream_currency = fields.Many2One(
'currency.currency', 'Currency', states=BEANSTREAM_STATES,
depends=['provider']
)
beanstream_auth_mechanism = fields.Selection([
('passcode', 'Passcode'),
('hash', 'Hash SHA1')
], 'Authentication Mechanism', states=BEANSTREAM_STATES)
beanstream_pass_code = fields.Char(
'Pass Code', states={
'required': (
(Eval('provider') == 'beanstream') &
(Eval('beanstream_auth_mechanism') == 'passcode')
),
'invisible': ~(
(Eval('provider') == 'beanstream') &
(Eval('beanstream_auth_mechanism') == 'passcode')
)
}, depends=['provider', 'beanstream_auth_mechanism']
)
beanstream_hash_key = fields.Char(
"Hash Key", states={
'required': (
(Eval('provider') == 'beanstream') &
(Eval('beanstream_auth_mechanism') == 'hash')
),
'invisible': ~(
(Eval('provider') == 'beanstream') &
(Eval('beanstream_auth_mechanism') == 'hash')
)
}, depends=['provider', 'beanstream_auth_mechanism']
)
@staticmethod
def default_auth_mechanism():
return 'passcode'
@classmethod
def get_providers(cls, values=None):
"""
Downstream modules can add to the list
"""
rv = super(PaymentGatewayBeanstream, cls).get_providers()
beanstream_record = ('beanstream', 'Beanstream')
if beanstream_record not in rv:
rv.append(beanstream_record)
return rv
def get_methods(self):
if self.provider == 'beanstream':
return [
('credit_card', 'Credit Card - Beanstream'),
]
return super(PaymentGatewayBeanstream, self).get_methods()
def get_beanstream_client(self):
"""
Returns a Beanstream client API
"""
validation_params = {}
if self.beanstream_auth_mechanism == 'passcode':
validation_params['pass_code'] = self.beanstream_pass_code
elif self.beanstream_auth_mechanism == 'hash':
validation_params['hash_key'] = self.beanstream_hash_key
return BeanstreamClient(
merchant_id=self.beanstream_merchant_id,
test=self.test,
**validation_params
)
class BeanstreamTransaction:
"""
Implement the authorize and capture methods
"""
__name__ = 'payment_gateway.transaction'
def get_beanstream_transaction_dict(self):
"""
Returns a dictionary of variables as required for the beanstream
request
"""
Currency = Pool().get('currency.currency')
res = {
'requestType': 'BACKEND',
'trnOrderNumber': self.uuid,
'trnAmount': str(Currency.compute(
self.currency, self.amount, self.gateway.beanstream_currency)
),
}
return res
def authorize_beanstream(self, credit_card=None):
"""
Authorize using beanstream for the specific transaction.
:param credit_card: An instance of CreditCardView
"""
raise self.raise_user_error('feature_not_available')
def capture_beanstream(self, card_info=None):
"""
Capture using beanstream for the specific transaction.
:param card_info: An instance of CreditCardView
"""
TransactionLog = Pool().get('payment_gateway.transaction.log')
credit_card = None
if card_info is not None:
credit_card = CreditCard(
card_info.number,
str(card_info.expiry_year)[-2:],
card_info.expiry_month,
card_info.owner,
card_info.csc,
)
client = self.gateway.get_beanstream_client()
result = client.transaction.purchase(self, credit_card)
# save the result to the logs
TransactionLog.serialize_and_create(self, result)
# TODO: Update the timestamp with the trnDate return value from
# beanstream sent in the format '%m/%d/%Y %I:%M:%S %p' but the
# timezone is not mentioned in the docs
self.provider_reference = result['trnId']
if result['trnApproved'] == '1':
self.state = 'completed'
self.safe_post()
else:
self.state = 'failed'
self.save()
def retry_beanstream(self, credit_card=None):
"""
Authorize using beanstream for the specific transaction.
:param credit_card: An instance of CreditCardView
"""
raise self.raise_user_error('feature_not_available')
def settle_beanstream(self):
"""
Settle the authporized payment
"""
raise self.raise_user_error('feature_not_available')
def update_beanstream(self):
"""
Update the status of the transaction from beanstream
"""
TransactionLog = Pool().get('payment_gateway.transaction.log')
client = self.gateway.get_beanstream_client()
result = client.transaction.query(self.uuid)
# save the result to the logs
TransactionLog.serialize_and_create(self, result)
print result
class AddPaymentProfileView:
__name__ = 'party.payment_profile.add_view'
@classmethod
def get_providers(cls):
"""
Return the list of providers who support credit card profiles.
"""
res = super(AddPaymentProfileView, cls).get_providers()
res.append(('beanstream', 'Beanstream'))
return res
class Address:
"""
Add beanstream contact details fetching ability to address
"""
__name__ = 'party.address'
def get_beanstream_contact_dict(self):
"""
Returns a dictionary with keys as beanstream variables and
corresponding values from address and party.
"""
res = {
'ordName': self.name or self.party.name,
'ordAddress1': self.street,
'ordAddress2': self.streetbis,
'ordCity': self.city,
'ordPostalCode': self.zip,
'ordEmailAddress': self.party.email,
'ordPhoneNumber': self.party.phone,
'ordCountry': self.country and self.country.code,
}
if self.country and self.country.code in ('US', 'CA'):
# For US and Canada send province code else --
res['ordProvince'] = self.subdivision and self.subdivision.code[-2:]
else:
res['ordProvince'] = '--'
return dict((k, v) for k, v in res.iteritems() if v)
class AddPaymentProfile:
"""
Add a payment profile
"""
__name__ = 'party.party.payment_profile.add'
def transition_add_beanstream(self):
"""
Handle the case if the profile should be added for beanstream
"""
card_info = self.card_info
client = card_info.gateway.get_beanstream_client()
cc = CreditCard(
card_info.number,
str(card_info.expiry_year)[-2:], # 4 digit to 2 digit
card_info.expiry_month,
card_info.owner,
card_info.csc,
)
result = client.payment_profile.create(cc, card_info.address)
# Beanstream allows a profile functionality where multiple cards
# can be registered against a single person. While the feature
# looks close to the payment_proifle implementation, it is not
# possible to maintain such an integration. So in practive this
# module uses the customerCode automatically generated by
# beanstream as the reference for each card.
#
# In other words, each profile is maintained as a separate profile
# on beanstream
return self.create_profile(result['customerCode'])
|
|
from django.core.management.base import BaseCommand, CommandError
import sys
import logging
import signal
from collect.models import Collection, Tweet
from django.conf import settings
import operator
import json
from nltk import bigrams
from collections import Counter
import re
from collections import defaultdict
from nltk.corpus import stopwords
import string
import vincent
import arrow
from time import sleep
class Command(BaseCommand):
args = 'collection_id'
help = 'Analyze tweets'
def handle(self, *args, **options):
if (len(args)<1):
raise CommandError('Arguments "collection_id" is required!')
def exit_gracefully(signal, frame):
sys.exit(0)
signal.signal(signal.SIGINT, exit_gracefully)
signal.signal(signal.SIGTERM, exit_gracefully)
collection = Collection.objects.get(pk=args[0])
print collection.id
# t1 = 'germania'
# t2 = 'italia'
# dates_t1= []
# dates_t2 = []
path_to_json = settings.STATICFILES_DIRS[0] + 'json/'
# c = get_object_or_404(Collection, pk=collection_id, user=request.user)
list_of_tweets = []
for t in collection.tweets.all():
list_of_tweets.append(t)
# print list_of_tweets
start = arrow.now()
punctuation = list(string.punctuation)
stop = stopwords.words('spanish') + stopwords.words('french') + stopwords.words('german') + stopwords.words('italian') + stopwords.words('english') + punctuation + ["I'm", "don't", "i'm", "Don't", "l'a", "amp", "ter", "les", "c'est", 'de', 'en', 'el', 'https', 'rt', 'via', 'RT']
print stop
emoticons_str = r"""
(?:
[:=;] # Eyes
[oO\-]? # Nose (optional)
[D\)\]\(\]/\\OpP] # Mouth
)"""
regex_str = [
emoticons_str,
r'<[^>]+>', # HTML tags
r'(?:@[\w_]+)', # @-mentions
r"(?:\#+[\w_]+[\w\'_\-]*[\w_]+)", # hash-tags
r'http[s]?://(?:[a-z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-f][0-9a-f]))+',
r'(?:(?:\d+,?)+(?:\.?\d+)?)', # numbers
r"(?:[a-z][a-z'\-_]+[a-z])", # words with - and '
r'(?:[\w_]+)', # other words
r'(?:\S)' # anything else
]
tokens_re = re.compile(r'('+'|'.join(regex_str)+')',
re.VERBOSE | re.IGNORECASE)
emoticon_re = re.compile(r'^'+emoticons_str+'$',
re.VERBOSE | re.IGNORECASE)
def tokenize(s):
return tokens_re.findall(s)
def preprocess(s, lowercase=True):
tokens = tokenize(s)
if lowercase:
tokens = [token if emoticon_re.search(token)
else token.lower() for token in tokens]
return tokens
pr = 0
err = 0
a = 0
geo_data = {
"type": "FeatureCollection",
"features": []
}
count_stop = Counter()
count_hashtags = Counter()
count_mentions = Counter()
count_bigram = Counter()
for t in list_of_tweets:
try:
tweet = t.text
pr += 1
print str(t.tweet_time) + " ok: " + str(pr)
terms_stop = [term for term in preprocess(tweet)
if term not in stop and
not term.startswith(('#', '@', 'http')) and
len(term) > 2]
hashtags_only = [term for term in preprocess(tweet)
if term.startswith(('#')) and
len(term) > 2]
mentions_only = [term for term in preprocess(tweet)
if term.startswith(('@')) and len(term) > 1]
# terms = [term for term in preprocess(tweet['text'])
# if term not in stop and len(term) != 1]
# # track when the hashtag is mentioned
# if t1 in terms:
# dates_t1.append(tweet['created_at'])
# if t2 in terms:
# dates_t2.append(tweet['created_at'])
except:
err += 1
count_stop.update(terms_stop)
count_hashtags.update(hashtags_only)
count_mentions.update(mentions_only)
print "map: ",
print a
a += 1
try:
if t.lat:
geo_json_feature = {
"type": "Feature",
"geometry": {
"coordinates": [float(t.lon), float(t.lat)],"type":"Point"},
"properties": {
"text": tweet
}
}
geo_data['features'].append(geo_json_feature)
except:
pass
# asjdnalsjkdalsd
# askdjbaskjdhajkhsd
# if pr == 1000:
# break
nElements = 20
print "----------------------------"
print "generating most common terms json"
word_freq = count_stop.most_common(nElements)
labels, freq = zip(*word_freq)
data = {'data': freq, 'x': labels}
bar = vincent.Bar(data, iter_idx='x', height=400, width=600)
bar.x_axis_properties(label_angle=-45, label_align="right")
bar.legend(title="Most frequent terms")
bar.to_json(path_to_json + str(collection.id) + '_freq_terms.json')
print "generating most common hashtags json"
word_freq = count_hashtags.most_common(nElements)
labels, freq = zip(*word_freq)
data = {'data': freq, 'x': labels}
bar = vincent.Bar(data, iter_idx='x', height=400, width=600)
bar.x_axis_properties(label_angle=-45, label_align="right")
bar.legend(title="Most frequent hashtags")
bar.to_json(path_to_json + str(collection.id) + '_freq_hashtags.json')
print "generating most common mentions json"
word_freq = count_mentions.most_common(nElements)
labels, freq = zip(*word_freq)
data = {'data': freq, 'x': labels}
bar = vincent.Bar(data, iter_idx='x', height=400, width=600)
bar.x_axis_properties(label_angle=-45, label_align="right")
bar.legend(title="Most frequent mentions")
bar.to_json(path_to_json + str(collection.id) + '_freq_mentions.json')
# Save geo data
with open(path_to_json + str(collection.id) + '_map.json', 'w') as fout:
fout.write(json.dumps(geo_data, indent=4))
# print "time charting now"
# # 1 time charting
# print dates_t1
# print 'asdasdasdasdasdasdadsasd'
# sleep(3)
# ones = [1]*len(dates_t1)
# twos = [1]*len(dates_t2)
# # 2 the index of the series
# print '2'
# idxn = pandas.DatetimeIndex(dates_t1)
# idxp = pandas.DatetimeIndex(dates_t2)
# # 3 the actual series (at series of 1s for the moment)
# print '3'
# t1 = pandas.Series(ones, index=idxn)
# t2 = pandas.Series(twos, index=idxp)
# # 4 Resampling / bucketing
# print '4'
# per_minute_t1 = t1.resample('1Min', how='sum').fillna(0)
# per_minute_t2 = t2.resample('1Min', how='sum').fillna(0)
# # 5 all the data together
# print '5'
# match_data = dict(t1=per_minute_t1, t2=per_minute_t2)
# # 6 we need a DataFrame, to accommodate multiple series
# print '6'
# all_matches = pandas.DataFrame(data=match_data,
# index=per_minute_t1.index)
# # 7 Resampling as above
# print '7'
# all_matches = all_matches.resample('1Min', how='sum').fillna(0)
# # 8 and now the plotting
# print '8'
# boh = [t1, t2]
# time_chart = vincent.Line(all_matches[['germania', 'italia']])
# time_chart.axis_titles(x='Time', y='Freq')
# tit = t1 + ' vs ' + t2
# time_chart.legend(title=tit)
# time_chart.to_json(path_to_json + str(collection.id) + '_time.json', 'w')
# print "started at: " + str(start)
# print ''
# stop = arrow.now()
# print "stopped at: " + str(stop)
# print ''
# print "total time: " + str(stop - start)
|
|
"""
Over the application Model, queries to the database
"""
import datetime
import logging
from sqlalchemy.orm import Session, joinedload
import tools
from model import MachineInterface, Machine, MachineDisk, \
Schedule, ScheduleRoles, LifecycleIgnition, LifecycleCoreosInstall, LifecycleRolling
from smartdb import SmartDatabaseClient as sc
logger = logging.getLogger(__name__)
class InjectLifecycle:
"""
Store the data from the Lifecycle machine state
"""
def __init__(self, session, request_raw_query):
self.session = session
self.adds = 0
self.updates = 0
self.mac = tools.get_mac_from_raw_query(request_raw_query)
self.machine = self.session.query(Machine).join(MachineInterface).filter(
MachineInterface.mac == self.mac).first()
if not self.machine:
m = "InjectLifecycle mac: '%s' unknown in db" % self.mac
logger.error(m)
raise AttributeError(m)
logger.debug("InjectLifecycle mac: %s" % self.mac)
def refresh_lifecycle_ignition(self, up_to_date: bool):
lifecycle = self.session.query(LifecycleIgnition).filter(
LifecycleIgnition.machine_id == self.machine.id).first()
if not lifecycle:
lifecycle = LifecycleIgnition(
machine_id=self.machine.id,
up_to_date=up_to_date
)
self.session.add(lifecycle)
else:
now = datetime.datetime.utcnow()
if lifecycle.up_to_date != up_to_date:
lifecycle.last_change_date = now
lifecycle.up_to_date = up_to_date
lifecycle.updated_date = now
self.session.commit()
def refresh_lifecycle_coreos_install(self, success: bool):
lifecycle = self.session.query(LifecycleCoreosInstall).filter(
LifecycleCoreosInstall.machine_id == self.machine.id).first()
if not lifecycle:
lifecycle = LifecycleCoreosInstall(
machine_id=self.machine.id,
success=success
)
self.session.add(lifecycle)
else:
lifecycle.up_to_date = success
lifecycle.updated_date = datetime.datetime.utcnow()
self.session.commit()
def apply_lifecycle_rolling(self, enable: bool, strategy="kexec"):
lifecycle = self.session.query(LifecycleRolling).filter(
LifecycleRolling.machine_id == self.machine.id).first()
if not lifecycle:
lifecycle = LifecycleRolling(
machine_id=self.machine.id,
enable=enable,
strategy=strategy,
)
self.session.add(lifecycle)
else:
lifecycle.enable = enable
lifecycle.strategy = strategy
lifecycle.updated_date = datetime.datetime.utcnow()
self.session.commit()
class FetchLifecycle:
"""
Get the data of the Lifecycle state
"""
def __init__(self, session: Session):
self.session = session
def get_ignition_uptodate_status(self, mac: str):
for row in self.session.execute("""SELECT li.up_to_date FROM "machine-interface" AS mi
JOIN machine AS m ON m.id = mi.machine_id
JOIN "lifecycle-ignition" AS li ON li.machine_id = mi.machine_id
WHERE mi.mac = :mac""", {"mac": mac}):
return row["up_to_date"]
return None
def get_all_updated_status(self):
status = []
for machine in self.session.query(Machine).join(LifecycleIgnition).join(
MachineInterface).filter(MachineInterface.as_boot == True):
status.append({
"up-to-date": machine.lifecycle_ignition[0].up_to_date,
"fqdn": machine.interfaces[0].fqdn,
"mac": machine.interfaces[0].mac,
"cidrv4": machine.interfaces[0].cidrv4,
"created_date": machine.created_date,
"updated_date": machine.updated_date,
"last_change_date": machine.lifecycle_ignition[0].last_change_date,
})
return status
def get_coreos_install_status(self, mac: str):
for row in self.session.execute("""SELECT lci.success FROM "machine-interface" AS mi
JOIN machine AS m ON m.id = mi.machine_id
JOIN "lifecycle-coreos-install" AS lci ON lci.machine_id = mi.machine_id
WHERE mi.mac = :mac""", {"mac": mac}):
return bool(row["success"])
return None
def get_all_coreos_install_status(self):
life_status_list = []
for machine in self.session.query(Machine).join(LifecycleCoreosInstall).join(MachineInterface).filter(
MachineInterface.as_boot == True):
life_status_list.append({
"mac": machine.interfaces[0].mac,
"fqdn": machine.interfaces[0].fqdn,
"cidrv4": machine.interfaces[0].cidrv4,
"success": machine.lifecycle_coreos_install[0].success,
"created_date": machine.lifecycle_coreos_install[0].created_date,
"updated_date": machine.lifecycle_coreos_install[0].updated_date
})
return life_status_list
def get_rolling_status(self, mac: str):
for m in self.session.query(Machine) \
.join(MachineInterface) \
.filter(MachineInterface.mac == mac) \
.join(LifecycleRolling):
try:
rolling = m.lifecycle_rolling[0]
return rolling.enable, rolling.strategy
except IndexError:
pass
logger.debug("mac: %s return None" % mac)
return None, None
def get_all_rolling_status(self):
life_roll_list = []
for machine in self.session.query(Machine) \
.join(LifecycleRolling) \
.join(MachineInterface) \
.options(joinedload("interfaces")) \
.options(joinedload("lifecycle_rolling")) \
.filter(MachineInterface.as_boot == True):
try:
life_roll_list.append({
"mac": machine.interfaces[0].mac,
"fqdn": machine.interfaces[0].fqdn,
"cidrv4": machine.interfaces[0].cidrv4,
"enable": bool(machine.lifecycle_rolling[0].enable),
"created_date": machine.lifecycle_rolling[0].created_date,
"updated_date": machine.lifecycle_rolling[0].updated_date
})
except IndexError:
pass
return life_roll_list
class BackupExport:
def __init__(self, session: Session):
self.session = session
self.playbook = []
@staticmethod
def _construct_discovery(machine: Machine):
interfaces = list()
mac_boot = ""
for interface in machine.interfaces:
if interface.as_boot is True:
mac_boot = interface.mac
interfaces.append({
'mac': interface.mac,
'netmask': interface.netmask,
'ipv4': interface.ipv4,
'cidrv4': interface.ipv4,
'name': interface.name,
"gateway": interface.gateway,
"fqdn": [interface.fqdn]
})
if mac_boot == "":
raise LookupError("fail to retrieve mac boot in %s" % interfaces)
return {
"boot-info": {
"uuid": machine.uuid,
"mac": mac_boot,
"random-id": "",
},
"interfaces": interfaces,
"disks": [{
'size-bytes': k.size,
'path': k.path
} for k in machine.disks],
# TODO LLDP
"lldp": {
'data': {'interfaces': None},
'is_file': False
},
"ignition-journal": None
}
@staticmethod
def _construct_schedule(mac: str, schedule_type: str):
"""
Construct the schedule as the scheduler does
:param mac:
:param schedule_type:
:return: dict
"""
# TODO maybe decide to drop etcd-member because it's tricky to deal with two roles
# etcd-member + kubernetes-control-plane: in fact it's only one
if schedule_type == ScheduleRoles.kubernetes_control_plane:
roles = [ScheduleRoles.kubernetes_control_plane, ScheduleRoles.etcd_member]
else:
roles = [ScheduleRoles.kubernetes_node]
return {
u"roles": roles,
u'selector': {
u"mac": mac
}
}
def get_playbook(self):
"""
Get and reproduce the data sent inside the db from an API level
:return:
"""
# TODO use the ORM loading
for schedule_type in [ScheduleRoles.kubernetes_control_plane, ScheduleRoles.kubernetes_node]:
for schedule in self.session.query(Schedule).filter(Schedule.role == schedule_type):
for machine in self.session.query(Machine).filter(Machine.id == schedule.machine_id):
discovery_data = self._construct_discovery(machine)
schedule_data = self._construct_schedule(discovery_data["boot-info"]["mac"], schedule_type)
self.playbook.append({"data": discovery_data, "route": "/discovery"})
self.playbook.append({"data": schedule_data, "route": "/scheduler"})
return self.playbook
|
|
# Copyright 2011 OpenStack Foundation
# Copyright 2012 Justin Santa Barbara
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
import webob
from nova.api.openstack.compute import security_groups as \
secgroups_v21
from nova.api.openstack import wsgi
from nova import compute
from nova.compute import power_state
from nova import context as context_maker
import nova.db
from nova import exception
from nova import objects
from nova import quota
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
from nova.tests import uuidsentinel as uuids
CONF = cfg.CONF
FAKE_UUID1 = 'a47ae74e-ab08-447f-8eee-ffd43fc46c16'
FAKE_UUID2 = 'c6e6430a-6563-4efa-9542-5e93c9e97d18'
UUID_SERVER = uuids.server
class AttrDict(dict):
def __getattr__(self, k):
return self[k]
def security_group_request_template(**kwargs):
sg = kwargs.copy()
sg.setdefault('name', 'test')
sg.setdefault('description', 'test-description')
return sg
def security_group_template(**kwargs):
sg = kwargs.copy()
sg.setdefault('tenant_id', '123')
sg.setdefault('name', 'test')
sg.setdefault('description', 'test-description')
return sg
def security_group_db(security_group, id=None):
attrs = security_group.copy()
if 'tenant_id' in attrs:
attrs['project_id'] = attrs.pop('tenant_id')
if id is not None:
attrs['id'] = id
attrs.setdefault('rules', [])
attrs.setdefault('instances', [])
return AttrDict(attrs)
def security_group_rule_template(**kwargs):
rule = kwargs.copy()
rule.setdefault('ip_protocol', 'tcp')
rule.setdefault('from_port', 22)
rule.setdefault('to_port', 22)
rule.setdefault('parent_group_id', 2)
return rule
def security_group_rule_db(rule, id=None):
attrs = rule.copy()
if 'ip_protocol' in attrs:
attrs['protocol'] = attrs.pop('ip_protocol')
return AttrDict(attrs)
def return_server(context, server_id,
columns_to_join=None, use_slave=False):
return fake_instance.fake_db_instance(
**{'id': 1,
'power_state': 0x01,
'host': "localhost",
'uuid': server_id,
'name': 'asdf'})
def return_server_by_uuid(context, server_uuid,
columns_to_join=None,
use_slave=False):
return fake_instance.fake_db_instance(
**{'id': 1,
'power_state': 0x01,
'host': "localhost",
'uuid': server_uuid,
'name': 'asdf'})
def return_non_running_server(context, server_id, columns_to_join=None):
return fake_instance.fake_db_instance(
**{'id': 1, 'power_state': power_state.SHUTDOWN,
'uuid': server_id, 'host': "localhost", 'name': 'asdf'})
def return_security_group_by_name(context, project_id, group_name,
columns_to_join=None):
return {'id': 1, 'name': group_name,
"instances": [{'id': 1, 'uuid': UUID_SERVER}]}
def return_security_group_without_instances(context, project_id, group_name):
return {'id': 1, 'name': group_name}
def return_server_nonexistent(context, server_id, columns_to_join=None):
raise exception.InstanceNotFound(instance_id=server_id)
class TestSecurityGroupsV21(test.TestCase):
secgrp_ctl_cls = secgroups_v21.SecurityGroupController
server_secgrp_ctl_cls = secgroups_v21.ServerSecurityGroupController
secgrp_act_ctl_cls = secgroups_v21.SecurityGroupActionController
def setUp(self):
super(TestSecurityGroupsV21, self).setUp()
self.controller = self.secgrp_ctl_cls()
self.server_controller = self.server_secgrp_ctl_cls()
self.manager = self.secgrp_act_ctl_cls()
# This needs to be done here to set fake_id because the derived
# class needs to be called first if it wants to set
# 'security_group_api' and this setUp method needs to be called.
if self.controller.security_group_api.id_is_uuid:
self.fake_id = '11111111-1111-1111-1111-111111111111'
else:
self.fake_id = '11111111'
self.req = fakes.HTTPRequest.blank('')
self.admin_req = fakes.HTTPRequest.blank('', use_admin_context=True)
def _assert_no_security_groups_reserved(self, context):
"""Check that no reservations are leaked during tests."""
result = quota.QUOTAS.get_project_quotas(context, context.project_id)
self.assertEqual(result['security_groups']['reserved'], 0)
def _assert_security_groups_in_use(self, project_id, user_id, in_use):
context = context_maker.get_admin_context()
result = quota.QUOTAS.get_user_quotas(context, project_id, user_id)
self.assertEqual(result['security_groups']['in_use'], in_use)
def test_create_security_group(self):
sg = security_group_request_template()
res_dict = self.controller.create(self.req, {'security_group': sg})
self.assertEqual(res_dict['security_group']['name'], 'test')
self.assertEqual(res_dict['security_group']['description'],
'test-description')
def test_create_security_group_with_no_name(self):
sg = security_group_request_template()
del sg['name']
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req,
{'security_group': sg})
self._assert_no_security_groups_reserved(
self.req.environ['nova.context'])
def test_create_security_group_with_no_description(self):
sg = security_group_request_template()
del sg['description']
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group': sg})
self._assert_no_security_groups_reserved(
self.req.environ['nova.context'])
def test_create_security_group_with_empty_description(self):
sg = security_group_request_template()
sg['description'] = ""
try:
self.controller.create(self.req, {'security_group': sg})
self.fail('Should have raised BadRequest exception')
except webob.exc.HTTPBadRequest as exc:
self.assertEqual('description has a minimum character requirement'
' of 1.', exc.explanation)
except exception.InvalidInput:
self.fail('Should have raised BadRequest exception instead of')
self._assert_no_security_groups_reserved(
self.req.environ['nova.context'])
def test_create_security_group_with_blank_name(self):
sg = security_group_request_template(name='')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group': sg})
self._assert_no_security_groups_reserved(
self.req.environ['nova.context'])
def test_create_security_group_with_whitespace_name(self):
sg = security_group_request_template(name=' ')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group': sg})
self._assert_no_security_groups_reserved(
self.req.environ['nova.context'])
def test_create_security_group_with_blank_description(self):
sg = security_group_request_template(description='')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group': sg})
self._assert_no_security_groups_reserved(
self.req.environ['nova.context'])
def test_create_security_group_with_whitespace_description(self):
sg = security_group_request_template(description=' ')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group': sg})
self._assert_no_security_groups_reserved(
self.req.environ['nova.context'])
def test_create_security_group_with_duplicate_name(self):
sg = security_group_request_template()
# FIXME: Stub out _get instead of creating twice
self.controller.create(self.req, {'security_group': sg})
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group': sg})
self._assert_no_security_groups_reserved(
self.req.environ['nova.context'])
def test_create_security_group_with_no_body(self):
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req, None)
self._assert_no_security_groups_reserved(
self.req.environ['nova.context'])
def test_create_security_group_with_no_security_group(self):
body = {'no-securityGroup': None}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req, body)
self._assert_no_security_groups_reserved(
self.req.environ['nova.context'])
def test_create_security_group_above_255_characters_name(self):
sg = security_group_request_template(name='1234567890' * 26)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group': sg})
self._assert_no_security_groups_reserved(
self.req.environ['nova.context'])
def test_create_security_group_above_255_characters_description(self):
sg = security_group_request_template(description='1234567890' * 26)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group': sg})
self._assert_no_security_groups_reserved(
self.req.environ['nova.context'])
def test_create_security_group_non_string_name(self):
sg = security_group_request_template(name=12)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group': sg})
self._assert_no_security_groups_reserved(
self.req.environ['nova.context'])
def test_create_security_group_non_string_description(self):
sg = security_group_request_template(description=12)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group': sg})
self._assert_no_security_groups_reserved(
self.req.environ['nova.context'])
def test_create_security_group_quota_limit(self):
for num in range(1, CONF.quota_security_groups):
name = 'test%s' % num
sg = security_group_request_template(name=name)
res_dict = self.controller.create(self.req, {'security_group': sg})
self.assertEqual(res_dict['security_group']['name'], name)
sg = security_group_request_template()
self.assertRaises(webob.exc.HTTPForbidden, self.controller.create,
self.req, {'security_group': sg})
def test_get_security_group_list(self):
groups = []
for i, name in enumerate(['default', 'test']):
sg = security_group_template(id=i + 1,
name=name,
description=name + '-desc',
rules=[])
groups.append(sg)
expected = {'security_groups': groups}
def return_security_groups(context, project_id):
return [security_group_db(sg) for sg in groups]
self.stub_out('nova.db.security_group_get_by_project',
return_security_groups)
res_dict = self.controller.index(self.req)
self.assertEqual(res_dict, expected)
def test_get_security_group_list_missing_group_id_rule(self):
groups = []
rule1 = security_group_rule_template(cidr='10.2.3.124/24',
parent_group_id=1,
group_id={}, id=88,
protocol='TCP')
rule2 = security_group_rule_template(cidr='10.2.3.125/24',
parent_group_id=1,
id=99, protocol=88,
group_id='HAS_BEEN_DELETED')
sg = security_group_template(id=1,
name='test',
description='test-desc',
rules=[rule1, rule2])
groups.append(sg)
# An expected rule here needs to be created as the api returns
# different attributes on the rule for a response than what was
# passed in. For example:
# "cidr": "0.0.0.0/0" ->"ip_range": {"cidr": "0.0.0.0/0"}
expected_rule = security_group_rule_template(
ip_range={'cidr': '10.2.3.124/24'}, parent_group_id=1,
group={}, id=88, ip_protocol='TCP')
expected = security_group_template(id=1,
name='test',
description='test-desc',
rules=[expected_rule])
expected = {'security_groups': [expected]}
def return_security_groups(context, project, search_opts):
return [security_group_db(sg) for sg in groups]
self.stubs.Set(self.controller.security_group_api, 'list',
return_security_groups)
res_dict = self.controller.index(self.req)
self.assertEqual(res_dict, expected)
def test_get_security_group_list_all_tenants(self):
all_groups = []
tenant_groups = []
for i, name in enumerate(['default', 'test']):
sg = security_group_template(id=i + 1,
name=name,
description=name + '-desc',
rules=[])
all_groups.append(sg)
if name == 'default':
tenant_groups.append(sg)
all = {'security_groups': all_groups}
tenant_specific = {'security_groups': tenant_groups}
def return_all_security_groups(context):
return [security_group_db(sg) for sg in all_groups]
self.stub_out('nova.db.security_group_get_all',
return_all_security_groups)
def return_tenant_security_groups(context, project_id):
return [security_group_db(sg) for sg in tenant_groups]
self.stub_out('nova.db.security_group_get_by_project',
return_tenant_security_groups)
path = '/v2/fake/os-security-groups'
req = fakes.HTTPRequest.blank(path, use_admin_context=True)
res_dict = self.controller.index(req)
self.assertEqual(res_dict, tenant_specific)
req = fakes.HTTPRequest.blank('%s?all_tenants=1' % path,
use_admin_context=True)
res_dict = self.controller.index(req)
self.assertEqual(res_dict, all)
def test_get_security_group_by_instance(self):
groups = []
for i, name in enumerate(['default', 'test']):
sg = security_group_template(id=i + 1,
name=name,
description=name + '-desc',
rules=[])
groups.append(sg)
expected = {'security_groups': groups}
def return_instance(context, server_id,
columns_to_join=None, use_slave=False):
self.assertEqual(server_id, FAKE_UUID1)
return return_server_by_uuid(context, server_id)
self.stub_out('nova.db.instance_get_by_uuid',
return_instance)
def return_security_groups(context, instance_uuid):
self.assertEqual(instance_uuid, FAKE_UUID1)
return [security_group_db(sg) for sg in groups]
self.stub_out('nova.db.security_group_get_by_instance',
return_security_groups)
res_dict = self.server_controller.index(self.req, FAKE_UUID1)
self.assertEqual(res_dict, expected)
@mock.patch('nova.db.instance_get_by_uuid')
@mock.patch('nova.db.security_group_get_by_instance', return_value=[])
def test_get_security_group_empty_for_instance(self, mock_sec_group,
mock_db_get_ins):
expected = {'security_groups': []}
def return_instance(context, server_id,
columns_to_join=None, use_slave=False):
self.assertEqual(server_id, FAKE_UUID1)
return return_server_by_uuid(context, server_id)
mock_db_get_ins.side_effect = return_instance
res_dict = self.server_controller.index(self.req, FAKE_UUID1)
self.assertEqual(expected, res_dict)
mock_sec_group.assert_called_once_with(
self.req.environ['nova.context'], FAKE_UUID1)
def test_get_security_group_by_instance_non_existing(self):
self.stub_out('nova.db.instance_get', return_server_nonexistent)
self.stub_out('nova.db.instance_get_by_uuid',
return_server_nonexistent)
self.assertRaises(webob.exc.HTTPNotFound,
self.server_controller.index, self.req, '1')
def test_get_security_group_by_instance_invalid_id(self):
self.assertRaises(webob.exc.HTTPNotFound,
self.server_controller.index, self.req, 'invalid')
def test_get_security_group_by_id(self):
sg = security_group_template(id=2, rules=[])
def return_security_group(context, group_id, columns_to_join=None):
self.assertEqual(sg['id'], group_id)
return security_group_db(sg)
self.stub_out('nova.db.security_group_get',
return_security_group)
res_dict = self.controller.show(self.req, '2')
expected = {'security_group': sg}
self.assertEqual(res_dict, expected)
def test_get_security_group_by_invalid_id(self):
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
self.req, 'invalid')
def test_get_security_group_by_non_existing_id(self):
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
self.req, self.fake_id)
def test_update_security_group(self):
sg = security_group_template(id=2, rules=[])
sg_update = security_group_template(id=2, rules=[],
name='update_name', description='update_desc')
def return_security_group(context, group_id, columns_to_join=None):
self.assertEqual(sg['id'], group_id)
return security_group_db(sg)
def return_update_security_group(context, group_id, values,
columns_to_join=None):
self.assertEqual(sg_update['id'], group_id)
self.assertEqual(sg_update['name'], values['name'])
self.assertEqual(sg_update['description'], values['description'])
return security_group_db(sg_update)
self.stub_out('nova.db.security_group_update',
return_update_security_group)
self.stub_out('nova.db.security_group_get',
return_security_group)
res_dict = self.controller.update(self.req, '2',
{'security_group': sg_update})
expected = {'security_group': sg_update}
self.assertEqual(res_dict, expected)
def test_update_security_group_name_to_default(self):
sg = security_group_template(id=2, rules=[], name='default')
def return_security_group(context, group_id, columns_to_join=None):
self.assertEqual(sg['id'], group_id)
return security_group_db(sg)
self.stub_out('nova.db.security_group_get',
return_security_group)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
self.req, '2', {'security_group': sg})
def test_update_default_security_group_fail(self):
sg = security_group_template()
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
self.req, '1', {'security_group': sg})
def test_delete_security_group_by_id(self):
sg = security_group_template(id=1, project_id='fake_project',
user_id='fake_user', rules=[])
self.called = False
def security_group_destroy(context, id):
self.called = True
def return_security_group(context, group_id, columns_to_join=None):
self.assertEqual(sg['id'], group_id)
return security_group_db(sg)
self.stub_out('nova.db.security_group_destroy',
security_group_destroy)
self.stub_out('nova.db.security_group_get',
return_security_group)
self.controller.delete(self.req, '1')
self.assertTrue(self.called)
def test_delete_security_group_by_admin(self):
sg = security_group_request_template()
self.controller.create(self.req, {'security_group': sg})
context = self.req.environ['nova.context']
# Ensure quota usage for security group is correct.
self._assert_security_groups_in_use(context.project_id,
context.user_id, 2)
# Delete the security group by admin.
self.controller.delete(self.admin_req, '2')
# Ensure quota for security group in use is released.
self._assert_security_groups_in_use(context.project_id,
context.user_id, 1)
def test_delete_security_group_by_invalid_id(self):
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
self.req, 'invalid')
def test_delete_security_group_by_non_existing_id(self):
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
self.req, self.fake_id)
def test_delete_security_group_in_use(self):
sg = security_group_template(id=1, rules=[])
def security_group_in_use(context, id):
return True
def return_security_group(context, group_id, columns_to_join=None):
self.assertEqual(sg['id'], group_id)
return security_group_db(sg)
self.stub_out('nova.db.security_group_in_use',
security_group_in_use)
self.stub_out('nova.db.security_group_get',
return_security_group)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
self.req, '1')
def test_associate_by_non_existing_security_group_name(self):
self.stub_out('nova.db.instance_get', return_server)
self.assertEqual(return_server(None, '1'),
nova.db.instance_get(None, '1'))
body = dict(addSecurityGroup=dict(name='non-existing'))
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._addSecurityGroup, self.req, '1', body)
def test_associate_by_invalid_server_id(self):
body = dict(addSecurityGroup=dict(name='test'))
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._addSecurityGroup, self.req,
'invalid', body)
def test_associate_without_body(self):
self.stub_out('nova.db.instance_get', return_server)
body = dict(addSecurityGroup=None)
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._addSecurityGroup, self.req, '1', body)
def test_associate_no_security_group_name(self):
self.stub_out('nova.db.instance_get', return_server)
body = dict(addSecurityGroup=dict())
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._addSecurityGroup, self.req, '1', body)
def test_associate_security_group_name_with_whitespaces(self):
self.stub_out('nova.db.instance_get', return_server)
body = dict(addSecurityGroup=dict(name=" "))
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._addSecurityGroup, self.req, '1', body)
def test_associate_non_existing_instance(self):
self.stub_out('nova.db.instance_get', return_server_nonexistent)
self.stub_out('nova.db.instance_get_by_uuid',
return_server_nonexistent)
body = dict(addSecurityGroup=dict(name="test"))
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._addSecurityGroup, self.req, '1', body)
def test_associate_non_running_instance(self):
self.stub_out('nova.db.instance_get', return_non_running_server)
self.stub_out('nova.db.instance_get_by_uuid',
return_non_running_server)
self.stub_out('nova.db.security_group_get_by_name',
return_security_group_without_instances)
body = dict(addSecurityGroup=dict(name="test"))
self.manager._addSecurityGroup(self.req, UUID_SERVER, body)
def test_associate_already_associated_security_group_to_instance(self):
self.stub_out('nova.db.instance_get', return_server)
self.stub_out('nova.db.instance_get_by_uuid',
return_server_by_uuid)
self.stub_out('nova.db.security_group_get_by_name',
return_security_group_by_name)
body = dict(addSecurityGroup=dict(name="test"))
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._addSecurityGroup, self.req,
UUID_SERVER, body)
@mock.patch.object(nova.db, 'instance_add_security_group')
def test_associate(self, mock_add_security_group):
self.stub_out('nova.db.instance_get', return_server)
self.stub_out('nova.db.instance_get_by_uuid',
return_server_by_uuid)
self.stub_out('nova.db.security_group_get_by_name',
return_security_group_without_instances)
body = dict(addSecurityGroup=dict(name="test"))
self.manager._addSecurityGroup(self.req, UUID_SERVER, body)
mock_add_security_group.assert_called_once_with(mock.ANY,
mock.ANY,
mock.ANY)
def test_disassociate_by_non_existing_security_group_name(self):
self.stub_out('nova.db.instance_get', return_server)
self.assertEqual(return_server(None, '1'),
nova.db.instance_get(None, '1'))
body = dict(removeSecurityGroup=dict(name='non-existing'))
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._removeSecurityGroup, self.req,
UUID_SERVER, body)
def test_disassociate_by_invalid_server_id(self):
self.stub_out('nova.db.security_group_get_by_name',
return_security_group_by_name)
body = dict(removeSecurityGroup=dict(name='test'))
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._removeSecurityGroup, self.req,
'invalid', body)
def test_disassociate_without_body(self):
self.stub_out('nova.db.instance_get', return_server)
body = dict(removeSecurityGroup=None)
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._removeSecurityGroup, self.req,
'1', body)
def test_disassociate_no_security_group_name(self):
self.stub_out('nova.db.instance_get', return_server)
body = dict(removeSecurityGroup=dict())
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._removeSecurityGroup, self.req,
'1', body)
def test_disassociate_security_group_name_with_whitespaces(self):
self.stub_out('nova.db.instance_get', return_server)
body = dict(removeSecurityGroup=dict(name=" "))
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._removeSecurityGroup, self.req,
'1', body)
def test_disassociate_non_existing_instance(self):
self.stub_out('nova.db.instance_get', return_server_nonexistent)
self.stub_out('nova.db.security_group_get_by_name',
return_security_group_by_name)
body = dict(removeSecurityGroup=dict(name="test"))
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._removeSecurityGroup,
self.req, '1', body)
def test_disassociate_non_running_instance(self):
self.stub_out('nova.db.instance_get', return_non_running_server)
self.stub_out('nova.db.instance_get_by_uuid',
return_non_running_server)
self.stub_out('nova.db.security_group_get_by_name',
return_security_group_by_name)
body = dict(removeSecurityGroup=dict(name="test"))
self.manager._removeSecurityGroup(self.req, UUID_SERVER, body)
def test_disassociate_already_associated_security_group_to_instance(self):
self.stub_out('nova.db.instance_get', return_server)
self.stub_out('nova.db.instance_get_by_uuid',
return_server_by_uuid)
self.stub_out('nova.db.security_group_get_by_name',
return_security_group_without_instances)
body = dict(removeSecurityGroup=dict(name="test"))
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._removeSecurityGroup, self.req,
UUID_SERVER, body)
@mock.patch.object(nova.db, 'instance_remove_security_group')
def test_disassociate(self, mock_remove_sec_group):
self.stub_out('nova.db.instance_get', return_server)
self.stub_out('nova.db.instance_get_by_uuid',
return_server_by_uuid)
self.stub_out('nova.db.security_group_get_by_name',
return_security_group_by_name)
body = dict(removeSecurityGroup=dict(name="test"))
self.manager._removeSecurityGroup(self.req, UUID_SERVER, body)
mock_remove_sec_group.assert_called_once_with(mock.ANY,
mock.ANY,
mock.ANY)
class TestSecurityGroupRulesV21(test.TestCase):
secgrp_ctl_cls = secgroups_v21.SecurityGroupRulesController
def setUp(self):
super(TestSecurityGroupRulesV21, self).setUp()
self.controller = self.secgrp_ctl_cls()
if self.controller.security_group_api.id_is_uuid:
id1 = '11111111-1111-1111-1111-111111111111'
id2 = '22222222-2222-2222-2222-222222222222'
self.invalid_id = '33333333-3333-3333-3333-333333333333'
else:
id1 = 1
id2 = 2
self.invalid_id = '33333333'
self.sg1 = security_group_template(id=id1)
self.sg2 = security_group_template(
id=id2, name='authorize_revoke',
description='authorize-revoke testing')
db1 = security_group_db(self.sg1)
db2 = security_group_db(self.sg2)
def return_security_group(context, group_id, columns_to_join=None):
if group_id == db1['id']:
return db1
if group_id == db2['id']:
return db2
raise exception.SecurityGroupNotFound(security_group_id=group_id)
self.stub_out('nova.db.security_group_get',
return_security_group)
self.parent_security_group = db2
self.req = fakes.HTTPRequest.blank('')
def test_create_by_cidr(self):
rule = security_group_rule_template(cidr='10.2.3.124/24',
parent_group_id=self.sg2['id'])
res_dict = self.controller.create(self.req,
{'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.sg2['id'])
self.assertEqual(security_group_rule['ip_range']['cidr'],
"10.2.3.124/24")
def test_create_by_group_id(self):
rule = security_group_rule_template(group_id=self.sg1['id'],
parent_group_id=self.sg2['id'])
res_dict = self.controller.create(self.req,
{'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.sg2['id'])
def test_create_by_same_group_id(self):
rule1 = security_group_rule_template(group_id=self.sg1['id'],
from_port=80, to_port=80,
parent_group_id=self.sg2['id'])
self.parent_security_group['rules'] = [security_group_rule_db(rule1)]
rule2 = security_group_rule_template(group_id=self.sg1['id'],
from_port=81, to_port=81,
parent_group_id=self.sg2['id'])
res_dict = self.controller.create(self.req,
{'security_group_rule': rule2})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.sg2['id'])
self.assertEqual(security_group_rule['from_port'], 81)
self.assertEqual(security_group_rule['to_port'], 81)
def test_create_none_value_from_to_port(self):
rule = {'parent_group_id': self.sg1['id'],
'group_id': self.sg1['id']}
res_dict = self.controller.create(self.req,
{'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertIsNone(security_group_rule['from_port'])
self.assertIsNone(security_group_rule['to_port'])
self.assertEqual(security_group_rule['group']['name'], 'test')
self.assertEqual(security_group_rule['parent_group_id'],
self.sg1['id'])
def test_create_none_value_from_to_port_icmp(self):
rule = {'parent_group_id': self.sg1['id'],
'group_id': self.sg1['id'],
'ip_protocol': 'ICMP'}
res_dict = self.controller.create(self.req,
{'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertEqual(security_group_rule['ip_protocol'], 'ICMP')
self.assertEqual(security_group_rule['from_port'], -1)
self.assertEqual(security_group_rule['to_port'], -1)
self.assertEqual(security_group_rule['group']['name'], 'test')
self.assertEqual(security_group_rule['parent_group_id'],
self.sg1['id'])
def test_create_none_value_from_to_port_tcp(self):
rule = {'parent_group_id': self.sg1['id'],
'group_id': self.sg1['id'],
'ip_protocol': 'TCP'}
res_dict = self.controller.create(self.req,
{'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertEqual(security_group_rule['ip_protocol'], 'TCP')
self.assertEqual(security_group_rule['from_port'], 1)
self.assertEqual(security_group_rule['to_port'], 65535)
self.assertEqual(security_group_rule['group']['name'], 'test')
self.assertEqual(security_group_rule['parent_group_id'],
self.sg1['id'])
def test_create_by_invalid_cidr_json(self):
rule = security_group_rule_template(
ip_protocol="tcp",
from_port=22,
to_port=22,
parent_group_id=self.sg2['id'],
cidr="10.2.3.124/2433")
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_rule': rule})
def test_create_by_invalid_tcp_port_json(self):
rule = security_group_rule_template(
ip_protocol="tcp",
from_port=75534,
to_port=22,
parent_group_id=self.sg2['id'],
cidr="10.2.3.124/24")
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_rule': rule})
def test_create_by_invalid_icmp_port_json(self):
rule = security_group_rule_template(
ip_protocol="icmp",
from_port=1,
to_port=256,
parent_group_id=self.sg2['id'],
cidr="10.2.3.124/24")
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_rule': rule})
def test_create_add_existing_rules_by_cidr(self):
rule = security_group_rule_template(cidr='10.0.0.0/24',
parent_group_id=self.sg2['id'])
self.parent_security_group['rules'] = [security_group_rule_db(rule)]
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_rule': rule})
def test_create_add_existing_rules_by_group_id(self):
rule = security_group_rule_template(group_id=1)
self.parent_security_group['rules'] = [security_group_rule_db(rule)]
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_rule': rule})
def test_create_with_no_body(self):
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req, None)
def test_create_with_no_security_group_rule_in_body(self):
rules = {'test': 'test'}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req, rules)
def test_create_with_invalid_parent_group_id(self):
rule = security_group_rule_template(parent_group_id='invalid')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_rule': rule})
def test_create_with_non_existing_parent_group_id(self):
rule = security_group_rule_template(group_id=None,
parent_group_id=self.invalid_id)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
self.req, {'security_group_rule': rule})
def test_create_with_non_existing_group_id(self):
rule = security_group_rule_template(group_id='invalid',
parent_group_id=self.sg2['id'])
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_rule': rule})
def test_create_with_invalid_protocol(self):
rule = security_group_rule_template(ip_protocol='invalid-protocol',
cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_rule': rule})
def test_create_with_no_protocol(self):
rule = security_group_rule_template(cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
del rule['ip_protocol']
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_rule': rule})
def test_create_with_invalid_from_port(self):
rule = security_group_rule_template(from_port='666666',
cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_rule': rule})
def test_create_with_invalid_to_port(self):
rule = security_group_rule_template(to_port='666666',
cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_rule': rule})
def test_create_with_non_numerical_from_port(self):
rule = security_group_rule_template(from_port='invalid',
cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_rule': rule})
def test_create_with_non_numerical_to_port(self):
rule = security_group_rule_template(to_port='invalid',
cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_rule': rule})
def test_create_with_no_from_port(self):
rule = security_group_rule_template(cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
del rule['from_port']
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_rule': rule})
def test_create_with_no_to_port(self):
rule = security_group_rule_template(cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
del rule['to_port']
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_rule': rule})
def test_create_with_invalid_cidr(self):
rule = security_group_rule_template(cidr='10.2.2222.0/24',
parent_group_id=self.sg2['id'])
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_rule': rule})
def test_create_with_no_cidr_group(self):
rule = security_group_rule_template(parent_group_id=self.sg2['id'])
res_dict = self.controller.create(self.req,
{'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.parent_security_group['id'])
self.assertEqual(security_group_rule['ip_range']['cidr'],
"0.0.0.0/0")
def test_create_with_invalid_group_id(self):
rule = security_group_rule_template(group_id='invalid',
parent_group_id=self.sg2['id'])
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_rule': rule})
def test_create_with_empty_group_id(self):
rule = security_group_rule_template(group_id='',
parent_group_id=self.sg2['id'])
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_rule': rule})
def test_create_with_nonexist_group_id(self):
rule = security_group_rule_template(group_id=self.invalid_id,
parent_group_id=self.sg2['id'])
self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
self.req, {'security_group_rule': rule})
def test_create_with_same_group_parent_id_and_group_id(self):
rule = security_group_rule_template(group_id=self.sg1['id'],
parent_group_id=self.sg1['id'])
res_dict = self.controller.create(self.req,
{'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.sg1['id'])
self.assertEqual(security_group_rule['group']['name'],
self.sg1['name'])
def _test_create_with_no_ports_and_no_group(self, proto):
rule = {'ip_protocol': proto, 'parent_group_id': self.sg2['id']}
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_rule': rule})
def _test_create_with_no_ports(self, proto):
rule = {'ip_protocol': proto, 'parent_group_id': self.sg2['id'],
'group_id': self.sg1['id']}
res_dict = self.controller.create(self.req,
{'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
expected_rule = {
'from_port': 1, 'group': {'tenant_id': '123', 'name': 'test'},
'ip_protocol': proto, 'to_port': 65535, 'parent_group_id':
self.sg2['id'], 'ip_range': {}, 'id': security_group_rule['id']
}
if proto == 'icmp':
expected_rule['to_port'] = -1
expected_rule['from_port'] = -1
self.assertEqual(expected_rule, security_group_rule)
def test_create_with_no_ports_icmp(self):
self._test_create_with_no_ports_and_no_group('icmp')
self._test_create_with_no_ports('icmp')
def test_create_with_no_ports_tcp(self):
self._test_create_with_no_ports_and_no_group('tcp')
self._test_create_with_no_ports('tcp')
def test_create_with_no_ports_udp(self):
self._test_create_with_no_ports_and_no_group('udp')
self._test_create_with_no_ports('udp')
def _test_create_with_ports(self, proto, from_port, to_port):
rule = {
'ip_protocol': proto, 'from_port': from_port, 'to_port': to_port,
'parent_group_id': self.sg2['id'], 'group_id': self.sg1['id']
}
res_dict = self.controller.create(self.req,
{'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
expected_rule = {
'from_port': from_port,
'group': {'tenant_id': '123', 'name': 'test'},
'ip_protocol': proto, 'to_port': to_port, 'parent_group_id':
self.sg2['id'], 'ip_range': {}, 'id': security_group_rule['id']
}
self.assertEqual(proto, security_group_rule['ip_protocol'])
self.assertEqual(from_port, security_group_rule['from_port'])
self.assertEqual(to_port, security_group_rule['to_port'])
self.assertEqual(expected_rule, security_group_rule)
def test_create_with_ports_icmp(self):
self._test_create_with_ports('icmp', 0, 1)
self._test_create_with_ports('icmp', 0, 0)
self._test_create_with_ports('icmp', 1, 0)
def test_create_with_ports_tcp(self):
self._test_create_with_ports('tcp', 1, 1)
self._test_create_with_ports('tcp', 1, 65535)
self._test_create_with_ports('tcp', 65535, 65535)
def test_create_with_ports_udp(self):
self._test_create_with_ports('udp', 1, 1)
self._test_create_with_ports('udp', 1, 65535)
self._test_create_with_ports('udp', 65535, 65535)
def test_delete(self):
rule = security_group_rule_template(id=self.sg2['id'],
parent_group_id=self.sg2['id'])
def security_group_rule_get(context, id):
return security_group_rule_db(rule)
def security_group_rule_destroy(context, id):
pass
self.stub_out('nova.db.security_group_rule_get',
security_group_rule_get)
self.stub_out('nova.db.security_group_rule_destroy',
security_group_rule_destroy)
self.controller.delete(self.req, self.sg2['id'])
def test_delete_invalid_rule_id(self):
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
self.req, 'invalid')
def test_delete_non_existing_rule_id(self):
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
self.req, self.invalid_id)
def test_create_rule_quota_limit(self):
for num in range(100, 100 + CONF.quota_security_group_rules):
rule = {
'ip_protocol': 'tcp', 'from_port': num,
'to_port': num, 'parent_group_id': self.sg2['id'],
'group_id': self.sg1['id']
}
self.controller.create(self.req, {'security_group_rule': rule})
rule = {
'ip_protocol': 'tcp', 'from_port': '121', 'to_port': '121',
'parent_group_id': self.sg2['id'], 'group_id': self.sg1['id']
}
self.assertRaises(webob.exc.HTTPForbidden, self.controller.create,
self.req, {'security_group_rule': rule})
def test_create_rule_cidr_allow_all(self):
rule = security_group_rule_template(cidr='0.0.0.0/0',
parent_group_id=self.sg2['id'])
res_dict = self.controller.create(self.req,
{'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.parent_security_group['id'])
self.assertEqual(security_group_rule['ip_range']['cidr'],
"0.0.0.0/0")
def test_create_rule_cidr_ipv6_allow_all(self):
rule = security_group_rule_template(cidr='::/0',
parent_group_id=self.sg2['id'])
res_dict = self.controller.create(self.req,
{'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.parent_security_group['id'])
self.assertEqual(security_group_rule['ip_range']['cidr'],
"::/0")
def test_create_rule_cidr_allow_some(self):
rule = security_group_rule_template(cidr='15.0.0.0/8',
parent_group_id=self.sg2['id'])
res_dict = self.controller.create(self.req,
{'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEqual(security_group_rule['id'], 0)
self.assertEqual(security_group_rule['parent_group_id'],
self.parent_security_group['id'])
self.assertEqual(security_group_rule['ip_range']['cidr'],
"15.0.0.0/8")
def test_create_rule_cidr_bad_netmask(self):
rule = security_group_rule_template(cidr='15.0.0.0/0')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, {'security_group_rule': rule})
UUID1 = '00000000-0000-0000-0000-000000000001'
UUID2 = '00000000-0000-0000-0000-000000000002'
UUID3 = '00000000-0000-0000-0000-000000000003'
def fake_compute_get_all(*args, **kwargs):
base = {'id': 1, 'description': 'foo', 'user_id': 'bar',
'project_id': 'baz', 'deleted': False, 'deleted_at': None,
'updated_at': None, 'created_at': None}
inst_list = [
fakes.stub_instance_obj(
None, 1, uuid=UUID1,
security_groups=[dict(base, **{'name': 'fake-0-0'}),
dict(base, **{'name': 'fake-0-1'})]),
fakes.stub_instance_obj(
None, 2, uuid=UUID2,
security_groups=[dict(base, **{'name': 'fake-1-0'}),
dict(base, **{'name': 'fake-1-1'})])
]
return objects.InstanceList(objects=inst_list)
def fake_compute_get(*args, **kwargs):
secgroups = objects.SecurityGroupList()
secgroups.objects = [
objects.SecurityGroup(name='fake-2-0'),
objects.SecurityGroup(name='fake-2-1'),
]
inst = fakes.stub_instance_obj(None, 1, uuid=UUID3)
inst.security_groups = secgroups
return inst
def fake_compute_create(*args, **kwargs):
return ([fake_compute_get(*args, **kwargs)], '')
def fake_get_instances_security_groups_bindings(inst, context, servers):
groups = {UUID1: [{'name': 'fake-0-0'}, {'name': 'fake-0-1'}],
UUID2: [{'name': 'fake-1-0'}, {'name': 'fake-1-1'}],
UUID3: [{'name': 'fake-2-0'}, {'name': 'fake-2-1'}]}
result = {}
for server in servers:
result[server['id']] = groups.get(server['id'])
return result
class SecurityGroupsOutputTestV21(test.TestCase):
base_url = '/v2/fake/servers'
content_type = 'application/json'
def setUp(self):
super(SecurityGroupsOutputTestV21, self).setUp()
fakes.stub_out_nw_api(self)
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
self.stubs.Set(compute.api.API, 'create', fake_compute_create)
self.app = self._setup_app()
def _setup_app(self):
return fakes.wsgi_app_v21(init_only=('os-security-groups', 'servers'))
def _make_request(self, url, body=None):
req = fakes.HTTPRequest.blank(url)
if body:
req.method = 'POST'
req.body = encodeutils.safe_encode(self._encode_body(body))
req.content_type = self.content_type
req.headers['Accept'] = self.content_type
res = req.get_response(self.app)
return res
def _encode_body(self, body):
return jsonutils.dumps(body)
def _get_server(self, body):
return jsonutils.loads(body).get('server')
def _get_servers(self, body):
return jsonutils.loads(body).get('servers')
def _get_groups(self, server):
return server.get('security_groups')
def test_create(self):
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
res = self._make_request(self.base_url, {'server': server})
self.assertEqual(res.status_int, 202)
server = self._get_server(res.body)
for i, group in enumerate(self._get_groups(server)):
name = 'fake-2-%s' % i
self.assertEqual(group.get('name'), name)
def test_show(self):
url = self.base_url + '/' + UUID3
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
server = self._get_server(res.body)
for i, group in enumerate(self._get_groups(server)):
name = 'fake-2-%s' % i
self.assertEqual(group.get('name'), name)
def test_detail(self):
url = self.base_url + '/detail'
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
for i, server in enumerate(self._get_servers(res.body)):
for j, group in enumerate(self._get_groups(server)):
name = 'fake-%s-%s' % (i, j)
self.assertEqual(group.get('name'), name)
def test_no_instance_passthrough_404(self):
def fake_compute_get(*args, **kwargs):
raise exception.InstanceNotFound(instance_id='fake')
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
url = self.base_url + '/70f6db34-de8d-4fbd-aafb-4065bdfa6115'
res = self._make_request(url)
self.assertEqual(res.status_int, 404)
class SecurityGroupsOutputPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(SecurityGroupsOutputPolicyEnforcementV21, self).setUp()
self.controller = secgroups_v21.SecurityGroupsOutputController()
self.req = fakes.HTTPRequest.blank('')
self.rule_name = "os_compute_api:os-security-groups"
self.rule = {self.rule_name: "project:non_fake"}
self.policy.set_rules(self.rule)
self.fake_res = wsgi.ResponseObject({
'server': {'id': '0'},
'servers': [{'id': '0'}, {'id': '2'}]})
@mock.patch('nova.policy.authorize')
def test_show_policy_softauth_is_called(self, mock_authorize):
mock_authorize.return_value = False
self.controller.show(self.req, self.fake_res, FAKE_UUID1)
self.assertTrue(mock_authorize.called)
@mock.patch.object(nova.network.security_group.openstack_driver,
"is_neutron_security_groups")
def test_show_policy_failed(self, is_neutron_security_groups):
self.controller.show(self.req, self.fake_res, FAKE_UUID1)
self.assertFalse(is_neutron_security_groups.called)
@mock.patch('nova.policy.authorize')
def test_create_policy_softauth_is_called(self, mock_authorize):
mock_authorize.return_value = False
self.controller.show(self.req, self.fake_res, {})
self.assertTrue(mock_authorize.called)
@mock.patch.object(nova.network.security_group.openstack_driver,
"is_neutron_security_groups")
def test_create_policy_failed(self, is_neutron_security_groups):
self.controller.create(self.req, self.fake_res, {})
self.assertFalse(is_neutron_security_groups.called)
@mock.patch('nova.policy.authorize')
def test_detail_policy_softauth_is_called(self, mock_authorize):
mock_authorize.return_value = False
self.controller.detail(self.req, self.fake_res)
self.assertTrue(mock_authorize.called)
@mock.patch.object(nova.network.security_group.openstack_driver,
"is_neutron_security_groups")
def test_detail_policy_failed(self, is_neutron_security_groups):
self.controller.detail(self.req, self.fake_res)
self.assertFalse(is_neutron_security_groups.called)
class PolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(PolicyEnforcementV21, self).setUp()
self.req = fakes.HTTPRequest.blank('')
self.rule_name = "os_compute_api:os-security-groups"
self.rule = {self.rule_name: "project:non_fake"}
def _common_policy_check(self, func, *arg, **kwarg):
self.policy.set_rules(self.rule)
exc = self.assertRaises(
exception.PolicyNotAuthorized, func, *arg, **kwarg)
self.assertEqual(
"Policy doesn't allow %s to be performed." % self.rule_name,
exc.format_message())
class SecurityGroupPolicyEnforcementV21(PolicyEnforcementV21):
def setUp(self):
super(SecurityGroupPolicyEnforcementV21, self).setUp()
self.controller = secgroups_v21.SecurityGroupController()
def test_create_policy_failed(self):
self._common_policy_check(self.controller.create, self.req, {})
def test_show_policy_failed(self):
self._common_policy_check(self.controller.show, self.req, FAKE_UUID1)
def test_delete_policy_failed(self):
self._common_policy_check(self.controller.delete, self.req, FAKE_UUID1)
def test_index_policy_failed(self):
self._common_policy_check(self.controller.index, self.req)
def test_update_policy_failed(self):
self._common_policy_check(
self.controller.update, self.req, FAKE_UUID1, {})
class ServerSecurityGroupPolicyEnforcementV21(PolicyEnforcementV21):
def setUp(self):
super(ServerSecurityGroupPolicyEnforcementV21, self).setUp()
self.controller = secgroups_v21.ServerSecurityGroupController()
def test_index_policy_failed(self):
self._common_policy_check(self.controller.index, self.req, FAKE_UUID1)
class SecurityGroupRulesPolicyEnforcementV21(PolicyEnforcementV21):
def setUp(self):
super(SecurityGroupRulesPolicyEnforcementV21, self).setUp()
self.controller = secgroups_v21.SecurityGroupRulesController()
def test_create_policy_failed(self):
self._common_policy_check(self.controller.create, self.req, {})
def test_delete_policy_failed(self):
self._common_policy_check(self.controller.delete, self.req, FAKE_UUID1)
class SecurityGroupActionPolicyEnforcementV21(PolicyEnforcementV21):
def setUp(self):
super(SecurityGroupActionPolicyEnforcementV21, self).setUp()
self.controller = secgroups_v21.SecurityGroupActionController()
def test_add_security_group_policy_failed(self):
self._common_policy_check(
self.controller._addSecurityGroup, self.req, FAKE_UUID1, {})
def test_remove_security_group_policy_failed(self):
self._common_policy_check(
self.controller._removeSecurityGroup, self.req, FAKE_UUID1, {})
class TestSecurityGroupsDeprecation(test.NoDBTestCase):
def setUp(self):
super(TestSecurityGroupsDeprecation, self).setUp()
self.controller = secgroups_v21.SecurityGroupController()
self.req = fakes.HTTPRequest.blank('', version='2.36')
def test_all_apis_return_not_found(self):
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.show, self.req, fakes.FAKE_UUID)
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.delete, self.req, fakes.FAKE_UUID)
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.index, self.req)
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.update, self.req, fakes.FAKE_UUID, {})
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.create, self.req, {})
class TestSecurityGroupRulesDeprecation(test.NoDBTestCase):
def setUp(self):
super(TestSecurityGroupRulesDeprecation, self).setUp()
self.controller = secgroups_v21.SecurityGroupRulesController()
self.req = fakes.HTTPRequest.blank('', version='2.36')
def test_all_apis_return_not_found(self):
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.create, self.req, {})
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.delete, self.req, fakes.FAKE_UUID)
|
|
# -*- coding: utf-8 -*-
# Copyright (C) Vincent BESANCON <besancon.vincent@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""SSH probe module."""
import logging as log
import string
import socket
from datetime import datetime
import ssh
from monitoring.nagios.probes import Probe
from monitoring.nagios.exceptions import NagiosUnknown
logger = log.getLogger('monitoring.nagios.probes')
class CommandResult(object):
"""
This class is for manipulating a remote command execution result.
It takes a :class:`ssh.Channel` object as his first parameter. Then
provides the following attributes:
.. attribute:: CommandResult.input
This is stdin for the command.
.. attribute:: CommandResult.output
This is a list of lines on stdout or output of the command.
.. attribute:: CommandResult.errors
This is a list of lines on stderr or all errors for the command.
.. attribute:: CommandResult.status
An integer for the command exit code.
"""
def __init__(self, channel):
self.input = channel.makefile('wb', -1)
self.output = map(string.strip, channel.makefile('rb', -1).readlines())
self.errors = map(
string.strip, channel.makefile_stderr('rb', -1).readlines())
self.status = channel.recv_exit_status()
class ProbeSSH(Probe):
"""
A SSH probe.
:param hostaddress: The host to connect to.
:type hostaddress: str
:param port: The remote port the remote host listen on.
:type port: int
:param username: Login user name. Default is to use the current
authenticated user.
:type username: str
:param password: Login user password. Default is to use the public key.
:type password: str
:param timeout: Connection timeout in seconds (default to 10 secs).
:type timeout: float
"""
class SSHError(Exception):
"""Base class for all SSH related errors."""
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class SSHCommandFailed(SSHError):
"""Exception triggered when a SSH command has failed."""
pass
class SSHCommandNotFound(SSHError):
"""Exception triggered when a SSH command is not found."""
pass
class SSHCommandTimeout(SSHError):
"""Exception triggered when a SSH command timed out."""
pass
def __init__(self, hostaddress='', port=22, username=None, password=None,
timeout=10.0):
super(ProbeSSH, self).__init__()
self.hostaddress = hostaddress
self.username = username
self._password = password
self.port = port
self.timeout = timeout
try:
self._ssh_client = ssh.SSHClient()
self._ssh_client.set_missing_host_key_policy(
ssh.MissingHostKeyPolicy())
self._ssh_client.connect(self.hostaddress,
self.port,
self.username,
self._password,
timeout=self.timeout,
compress=True)
except ssh.SSHException as e:
raise NagiosUnknown('''Cannot establish a SSH connection on remote
server !
Host: %s
Port: %s
Message: %s''' % (self.hostaddress, self.port, e))
except Exception as e:
raise NagiosUnknown('''Unexpected error during SSH connection !
Host: %s
Port: %s
Message: %s''' % (self.hostaddress, self.port, e))
def execute(self, command, timeout=None):
"""
Execute a command on the remote server and return results.
:param command: Command line to execute on the remote server.
:type command: str, unicode
:param timeout: Command execution timeout. Default to 10 secs.
:type timeout: float
:return: An instance of :class:`CommandResult`.
:raises ProbeSSH.SSHCommandTimeout: if executed command timed out.
"""
logger.debug('Execute SSH command: {}'.format(command))
# Set global timeout if not specified
if not timeout:
timeout = self.timeout
logger.debug('Timeout is set to %f.', timeout)
try:
chan = self._ssh_client.get_transport().open_session()
chan.settimeout(timeout)
chan.exec_command(command)
cmd_results = CommandResult(chan)
except socket.timeout:
raise self.SSHCommandTimeout("The command execution has timed out !"
"\nCommand: {}"
"\nTimeout: {}s".format(command,
timeout))
return cmd_results
def close(self):
"""
Close the SSH connection.
"""
self._ssh_client.close()
def list_files(self, directory='.', glob='*', depth=1):
"""
List all files in a directory. Optionnaly, you can specify a regexp to
filter files.
:param directory: Directory to look in. Default is the current working
directory.
:type directory: str
:param glob: Pattern to filter files. Default to '*' all.
:type glob: str
:param depth: Recursive level for scanning files. Default to disable
recursive scanning.
:type depth: int
:return: list(str)
"""
find = 'find {0} -name \'{1}\' -maxdepth {2}'.format(
directory, glob, depth)
files = self.execute(find).output
return files
def get_file_lastmodified_timestamp(self, filename,
stime='/usr/local/nagios/bin/stime'):
"""
Return the last modified Unix Epoch timestamp of a file.
:param filename: path to the file that should be checked.
:param stime: location of the stime binary. Default to
``/usr/local/nagios/bin/stime``.
:return: Unix timestamp.
:rtype: int
"""
logger.debug('Calling method get_file_lastmodified_timestamp().')
stime_command = "{0} -m {1}".format(stime, filename)
command = self.execute(stime_command)
if command.status == 127:
raise self.SSHCommandNotFound(
'Unable to find stime binary: {} !'.format(stime))
elif command.status != 0:
raise self.SSHCommandFailed(
'Problem during the execution of stime !\n'
'Command: {0}\n'
'Output: {1.output}\n'
'Errors: {1.errors}'.format(stime_command, command))
ts = command.output.pop()
try:
ts = int(ts)
except ValueError as e:
raise self.SSHError(
"Unexpected result in output of stime: {}".format(e))
return ts
def get_file_lastmodified_minutes(self, filename, **kwargs):
"""
Return minutes since file was last modified.
:param filename: path to the file that should be checked.
:return: Minutes.
:rtype: int
"""
logger.debug('Calling method get_file_lastmodified_minutes().')
last_modified_timestamp = self.get_file_lastmodified_timestamp(
filename, **kwargs)
now = datetime.today()
last_modified_totalsecs = (now - datetime.fromtimestamp(
last_modified_timestamp)).total_seconds()
last_modified_time = divmod(last_modified_totalsecs, 60)
return int(last_modified_time[0])
|
|
#!/usr/bin/env python
"""
@package mi.dataset.parser.test.test_phsen
@file marine-integrations/mi/dataset/parser/test/test_phsen.py
@author Emily Hahn
@brief Test code for a Phsen data parser
"""
import os
from nose.plugins.attrib import attr
from mi.core.log import get_logger ; log = get_logger()
from mi.dataset.test.test_parser import ParserUnitTestCase
from mi.dataset.parser.sio_mule_common import StateKey
from mi.dataset.dataset_driver import DataSetDriverConfigKeys
from mi.core.instrument.data_particle import DataParticleKey
from mi.dataset.parser.phsen import PhsenParser, PhsenParserDataParticle
from mi.dataset.parser.phsen import PhsenControlDataParticle
from mi.idk.config import Config
RESOURCE_PATH = os.path.join(Config().base_dir(), 'mi',
'dataset', 'driver', 'mflm',
'phsen', 'resource')
@attr('UNIT', group='mi')
class PhsenParserUnitTestCase(ParserUnitTestCase):
"""
Phsen Parser unit test suite
"""
def state_callback(self, state):
""" Call back method to watch what comes in via the position callback """
self.state_callback_value = state
def pub_callback(self, pub):
""" Call back method to watch what comes in via the publish callback """
self.publish_callback_value = pub
def exception_callback(self, exception):
""" Call back method to watch what comes in via the exception callback """
self.exception_callback_value = exception
def setUp(self):
ParserUnitTestCase.setUp(self)
self.config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.phsen',
DataSetDriverConfigKeys.PARTICLE_CLASS: ['PhsenParserDataParticle',
'PhsenControlDataParticle']
}
# Define test data particles and their associated timestamps which will be
# compared with returned results
# starts file index 367
self.particle_control = PhsenControlDataParticle('51EFC1C1*2B1281CE1572\x93\xf4' \
'\x82\x1e\xd3\x1e\x82\xfe0004000000000236A6\r')
self.particle_a = PhsenParserDataParticle('51EFC1C1^0A\r*2BE70ACE15724007EF0C5'
'707A208AA09E10C5F07A008AD09E10C6007A108AB09E00C5907A408AA09E10C6007A408AA'
'09E00C5C07A308A809E00C5B07A108A609E00C62079E08AC09D80C5E076808A809720C5E0'
'6ED08AA088D0C5C067508A907B70C5E062F08AC073F0C5F061808AA071C0C5E062908A907'
'360C5D064C08AB07720C5B067808A707BC0C5906A608A7080F0C6006D008A808590C5C06F'
'608AA089E0C61071708AB08D90C5C072F08A909070C60074408A7092F0C5E075508AA0950'
'0C5E076408A8096B0C5B076E08A8097F0C5C077A08AA09910C5D077E08AC099B00000C2807EF17\r')
# starts file index 1107
self.particle_b = PhsenParserDataParticle('51F01621^0A\r*2BE70AC\x8515C69F083'
'C0C6607A608CB0A0D0C6507A708CC0A0C0C6A07A808CF0A0C0C6A07A508D10A0A0C6607A'
'908CF0A0F0C6B07A908D10A0D0C6807A708C90A0D0C6C07A708D10A0D0C6707A808CE0A0'
'E0C6607A608CF0A0D0C6807A808CD0A0B0C6907A708CD0A0B0C6707A508CF0A0B0C6707A'
'808CC0A0C0C6C07A608D20A0B0C6A07A808CD0A0C0C6907A708CE0A0C0C6A07A808CD0A0'
'D0C6807A708CB0A0C0C6807A808CD0A0D0C6807A808CE0A0E0C6307A708C90A0B0C6607A7'
'08CD0A0A0C6507A708C80A0D0C6907A708CA0A0B0C6807A708CE0A0C0C6607A708CF0A0B0'
'0000C27083D64\r')
# starts file index 1805
self.particle_c = PhsenParserDataParticle('51F06A81^0A\r*2BE70ACE161AFF07FE0C6507AB' \
'08B609F90C5D07A808B709F70C6207AA08B709F50C5F07A908B809F70C6207A808B709' \
'F60C6107A808B409F40C60079508B309D00C6006BC08B708400C6504D708B5052B0C64' \
'031D08B802D50C62022308B601B80C6501BC08B901520C5F01AE08B601430C6001D708' \
'B701680C62022708B501BA0C62029708B902300C60031708B902C50C5903A008B20371' \
'0C63042908B804290C6404AA08B804E20C64052008B405930C63058808B706360C6405' \
'E308BA06CA0C63062E08B707490C61066D08B607B50C6506A408B708140C6206D008B5' \
'086200000C2807FF24\r')
# starts file index 13742
self.particle_d = PhsenParserDataParticle('51F0BEE1^0A\r*2BE70ACE1\x9e6F5F081' \
'50C6E07AD08DC0A1E0C6D07AE08DA0A210C6C07AB08DC0A1E0C6D07AD08D70A1E0C6C07A' \
'E08DA0A210C6B07AD08D50A1F0C6A07A708D60A120C6D075508D509730C6B061308D8073' \
'40C6D046F08D904A20C6B035508D903220C6B02DB08D7028A0C6F02CC08D702760C6B02F' \
'708D502A90C6E034708D8030C0C6B03AC08D4038D0C65041A08D504230C6B048608D404C' \
'10C6F04F508DB05640C6F055A08D806000C6905B508D406920C6D060508D5071A0C6E064' \
'B08D407910C67068408D507F60C6F06B708D808510C6C06DD08D608970C70070108DB08D' \
'500000C260816B2\r')
# starts file index 14171
self.particle_e = PhsenParserDataParticle('51F11341^0A\r*2BE70ACE16C3BF07FC0C6007A70' \
'8B609F70C6107A708B509F50C6107A608B609F50C6307A808BB09F60C5D07A908B709F5' \
'0C6107A908B809F60C65079208B809CF0C6106A408BA08250C61049708B804E70C6302D' \
'308B702930C5D01F508B3019F0C6001AD08B901500C6301B008B201500C5E01E208B301' \
'800C62023708B701DB0C6302A408B702500C65031C08B902DC0C63039A08B5037A0C630' \
'41808B804240C5C048F08B104CD0C64050208B805720C63056508B6060D0C6305BF08B5' \
'069C0C64060B08B7071A0C61064D08B507890C61068408B307E70C6306B308B60837000' \
'00C2708002A\r')
self.particle_f = PhsenParserDataParticle('51F167A0^0A\r*2BE70ACE17181F07E70C6207AA08' \
'C50A0A0C6907AC08CA0A0B0C6207AB08C50A0B0C6607A908C60A0A0C6807A908BF0A080C' \
'6B07AA08C70A0A0C67079208C509DC0C6906B908C808520C6404BD08C3051C0C63033D08' \
'C5030C0C6D028708C8022E0C66024D08C301EA0C69025C08C801FC0C67029808C9023D0C' \
'6402F008C102A10C65035308C4031C0C6703C008C703A70C67043008C8043F0C68049B08' \
'C904D80C6C050308C705740C68055E08C506070C6605B608C506910C67060308C6070C0C' \
'67064308C4077A0C66067A08C707D70C6706A708C508280C6706CD08C2086E00000C2507' \
'E88D\r')
self.state_callback_value = None
self.publish_callback_value = None
self.exception_callback_value = None
def assert_result(self, result, in_process_data, unprocessed_data, particle):
self.assertEqual(result, [particle])
self.assert_state(in_process_data, unprocessed_data)
self.assert_(isinstance(self.publish_callback_value, list))
self.assertEqual(self.publish_callback_value[0], particle)
def assert_state(self, in_process_data, unprocessed_data):
self.assertEqual(self.parser._state[StateKey.IN_PROCESS_DATA], in_process_data)
self.assertEqual(self.parser._state[StateKey.UNPROCESSED_DATA], unprocessed_data)
self.assertEqual(self.state_callback_value[StateKey.IN_PROCESS_DATA], in_process_data)
self.assertEqual(self.state_callback_value[StateKey.UNPROCESSED_DATA], unprocessed_data)
def test_simple(self):
"""
Read test data and pull out data particles one at a time.
Assert that the results are those we expected.
"""
stream_handle = open(os.path.join(RESOURCE_PATH,
'node59p1_shorter.dat'))
state = {StateKey.UNPROCESSED_DATA:[[0, 9000]],
StateKey.IN_PROCESS_DATA:[],
StateKey.FILE_SIZE: 17600}
self.parser = PhsenParser(self.config, state, stream_handle,
self.state_callback, self.pub_callback, self.exception_callback)
result = self.parser.get_records(1)
in_process = [[367, 911, 2, 1], [1106, 1610, 1, 0], [1804, 2308, 1, 0]]
unprocessed = [[0, 172], [367,911], [1106, 1610], [1804, 2308], [4100, 4171],
[5899, 5968], [7697, 7764], [8636, 9000]]
self.assert_result(result, in_process, unprocessed, self.particle_control)
result = self.parser.get_records(1)
in_process = [[1106, 1610, 1, 0], [1804, 2308, 1, 0]]
unprocessed = [[0, 172], [1106, 1610], [1804, 2308], [4100, 4171],
[5899, 5968], [7697, 7764], [8636, 9000]]
self.assert_result(result, in_process, unprocessed, self.particle_a)
result = self.parser.get_records(1)
in_process = [[1804, 2308, 1, 0]]
unprocessed = [[0, 172], [1804, 2308], [4100, 4171],
[5899, 5968], [7697, 7764], [8636, 9000]]
self.assert_result(result, in_process, unprocessed, self.particle_b)
stream_handle.close()
def test_get_many(self):
"""
Read test data and pull out multiple data particles at one time.
Assert that the results are those we expected.
"""
state = {StateKey.UNPROCESSED_DATA:[[0, 17600]],
StateKey.IN_PROCESS_DATA:[],
StateKey.FILE_SIZE: 17600}
stream_handle = open(os.path.join(RESOURCE_PATH,
'node59p1_shorter.dat'))
self.parser = PhsenParser(self.config, state, stream_handle, self.state_callback,
self.pub_callback, self.exception_callback)
result = self.parser.get_records(7)
stream_handle.close()
self.assertEqual(result,
[self.particle_control, self.particle_a, self.particle_b,
self.particle_c, self.particle_d, self.particle_e, self.particle_f])
# the remaining in process data is actually a particle with a bad sample
in_process = [[15536, 16040, 1, 0], [16301, 16805, 1, 0], [16998, 17502, 1, 0]]
unprocessed = [[0, 172], [4100, 4171], [5899, 5968], [7697, 7764],[9654,9723],
[11451,11520], [15536, 16040], [16301, 16805], [16998, 17600]]
self.assert_state(in_process, unprocessed)
self.assertEqual(self.publish_callback_value[0], self.particle_control)
self.assertEqual(self.publish_callback_value[1], self.particle_a)
self.assertEqual(self.publish_callback_value[2], self.particle_b)
self.assertEqual(self.publish_callback_value[3], self.particle_c)
self.assertEqual(self.publish_callback_value[4], self.particle_d)
self.assertEqual(self.publish_callback_value[5], self.particle_e)
self.assertEqual(self.publish_callback_value[6], self.particle_f)
def test_mid_state_start(self):
"""
Test starting the parser in a state in the middle of processing
"""
new_state = {StateKey.IN_PROCESS_DATA:[],
StateKey.UNPROCESSED_DATA:[[0, 172], [4100, 4171], [5899, 5968],
[7697, 7764], [8636, 16000]],
StateKey.FILE_SIZE: 17600}
stream_handle = open(os.path.join(RESOURCE_PATH,
'node59p1_shorter.dat'))
self.parser = PhsenParser(self.config, new_state, stream_handle,
self.state_callback, self.pub_callback, self.exception_callback)
result = self.parser.get_records(1)
self.assert_result(result, [[14142, 14646, 1, 0], [14839, 15343, 1, 0]],
[[0, 172], [4100, 4171], [5899, 5968], [7697, 7764], [9654, 9723],
[11451, 11520], [14142,14646], [14839,15343], [15536, 16000]],
self.particle_d)
result = self.parser.get_records(1)
self.assert_result(result, [[14839, 15343, 1, 0]],
[[0, 172], [4100, 4171], [5899, 5968], [7697, 7764], [9654, 9723],
[11451, 11520], [14839,15343], [15536, 16000]],
self.particle_e)
stream_handle.close()
def test_in_process_start(self):
"""
test starting a parser with a state in the middle of processing
"""
new_state = {StateKey.IN_PROCESS_DATA:[[1804, 2308, 1, 0]],
StateKey.UNPROCESSED_DATA:[[0, 172], [1804, 2308], [4100, 4171], [5899, 5968],
[7697, 7764], [8636, 16000]],
StateKey.FILE_SIZE: 17600}
stream_handle = open(os.path.join(RESOURCE_PATH,
'node59p1_shorter.dat'))
self.parser = PhsenParser(self.config, new_state, stream_handle,
self.state_callback, self.pub_callback, self.exception_callback)
result = self.parser.get_records(1)
self.assert_result(result, [],
[[0, 172], [4100, 4171], [5899, 5968], [7697, 7764], [8636, 16000]],
self.particle_c)
result = self.parser.get_records(1)
self.assert_result(result, [[14142, 14646, 1, 0], [14839, 15343, 1, 0]],
[[0, 172], [4100, 4171], [5899, 5968], [7697, 7764], [9654, 9723],
[11451, 11520], [14142,14646], [14839,15343], [15536, 16000]],
self.particle_d)
def test_set_state(self):
"""
Test changing to a new state after initializing the parser and
reading data, as if new data has been found and the state has
changed
"""
state = {StateKey.UNPROCESSED_DATA:[[0, 9000]], StateKey.IN_PROCESS_DATA:[],
StateKey.FILE_SIZE:17600}
new_state = {StateKey.UNPROCESSED_DATA:[[0, 172], [4100, 4171], [5899, 5968],
[7697, 7764], [8636, 14700]],
StateKey.IN_PROCESS_DATA:[],
StateKey.FILE_SIZE: 17600}
stream_handle = open(os.path.join(RESOURCE_PATH,
'node59p1_shorter.dat'))
self.parser = PhsenParser(self.config, state, stream_handle,
self.state_callback, self.pub_callback, self.exception_callback)
# there should only be 4 records, make sure we stop there
result = self.parser.get_records(4)
self.assert_state([], [[0, 172], [4100, 4171], [5899, 5968],
[7697, 7764], [8636, 9000]])
result = self.parser.get_records(1)
self.assertEqual(result, [])
self.parser.set_state(new_state)
result = self.parser.get_records(1)
stream_handle.close()
self.assert_result(result, [[14142, 14646, 1, 0]],
[[0, 172], [4100, 4171], [5899, 5968], [7697, 7764], [9654, 9723],
[11451, 11520], [14142,14700]],
self.particle_d)
def test_update(self):
"""
Test a file which has had a section of data replaced by 0s, as if a block of data has not been received yet,
then using the returned state make a new parser with the test data that has the 0s filled in
"""
log.debug('Starting test_update')
state = {StateKey.UNPROCESSED_DATA:[[0, 14700]],
StateKey.IN_PROCESS_DATA:[],
StateKey.FILE_SIZE: 17600}
# this file has a block of FL data replaced by 0s
stream_handle = open(os.path.join(RESOURCE_PATH,
'node59p1_replaced.dat'))
self.parser = PhsenParser(self.config, state, stream_handle,
self.state_callback, self.pub_callback, self.exception_callback)
result = self.parser.get_records(3)
self.assertEqual(result, [self.particle_b, self.particle_c, self.particle_d])
self.assert_state([[14142, 14646, 1, 0]],
[[0, 172], [367,911], [4100, 4171], [5899, 5968], [7697, 7764], [9654, 9723],
[11451, 11520], [14142,14700]])
# was b and c
stream_handle.close()
next_state = self.parser._state
# this file has the block of data that was missing in the previous file
stream_handle = open(os.path.join(RESOURCE_PATH,
'node59p1_shorter.dat'))
self.parser = PhsenParser(self.config, next_state, stream_handle,
self.state_callback, self.pub_callback, self.exception_callback)
# get last in process record
result = self.parser.get_records(1)
self.assert_result(result, [],
[[0, 172], [367,911], [4100, 4171], [5899, 5968], [7697, 7764], [9654, 9723],
[11451, 11520], [14646,14700]],
self.particle_e)
# now get the filled in record
result = self.parser.get_records(1)
self.assert_result(result, [[367,911,2,1]],
[[0, 172], [367,911], [4100, 4171], [5899, 5968], [7697, 7764], [9654, 9723],
[11451, 11520], [14646,14700]],
self.particle_control)
result = self.parser.get_records(1)
self.assert_result(result, [],
[[0, 172], [4100, 4171], [5899, 5968], [7697, 7764], [9654, 9723],
[11451, 11520], [14646,14700]],
self.particle_a)
stream_handle.close()
|
|
# tr_mongo_search.py chromatic universe 2018 william k. johnson
import datetime
from datetime import datetime , timedelta
from io import StringIO
import sys
import time
import json
import os
import pprint
import sys
from time import sleep , strptime
import tornado.websocket
import tornado.httpserver
import collections
import tornado
from tornado.queues import Queue
from tornado.ioloop import PeriodicCallback
from tornado.locks import Semaphore
from tornado.concurrent import run_on_executor
from concurrent.futures import ThreadPoolExecutor
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.escape import json_decode, json_encode
from tornado import gen
from tornado import web
from bson import SON , ObjectId
from bson import json_util
from io import StringIO
#chilkat mime
import chilkat
#motor
import motor.motor_tornado
cci_mongo = motor.motor_tornado.MotorClient( 'mongodb://localhost/?replicaSet=rs0' )
mongo_db = cci_mongo.imap2017
from cci_mta_trinity.streams.tr_mongo_rest import _logger
from cci_mta_trinity.streams.tr_mime import stage_one_mime_container
# -------------------------------------------------------------------------------------------------
@tornado.gen.coroutine
def chomp( source) :
"""
:param source:
:return:
"""
return source[1:-1]
# -------------------------------------------------------------------------------------------------
@tornado.gen.coroutine
def imap_internal_to_iso_date( source ) :
"""
:param source:
:return iso date:
"""
s = source
# split
ls = s.split( '-' )
# month chars to month sequence
ls[1] = strptime( ls[1] , '%b').tm_mon
# leading zero
ls[1] = "%02d" % ls[1]
# clumsy mongodb implementation : requires date range to specify
# exact day , thus this mishegas
obj = datetime.strptime( source , '%d-%b-%Y' )
next_obj = obj + timedelta( days=1 )
new_day = next_obj.strftime( '%d-%m-%Y' )
nls = new_day.split( '-' )
# format iso , day after iso
return obj , next_obj
# -------------------------------------------------------------------------------------------------
@tornado.gen.coroutine
def handle_message_ids( store_moniker , scope , buckets ) :
"""
:param store_moniker:
:param scope:
:param buckets:
:return:
"""
_logger.info( '...handle_message_ids...' )
bucket = None
if scope['all'] is True :
bucket = { "$match" : { "domain_canonical_store" : store_moniker } }
buckets.append( bucket )
else :
bucket = { "$match" : { "domain_canonical_store" : store_moniker } }
buckets.append( bucket )
bucket = { "$match" : { 'uid' : { '$in' : scope['uid_expanded_sequence'] } } }
buckets.append( bucket )
# -------------------------------------------------------------------------------------------------
@tornado.gen.coroutine
def handle_message_flags( store_moniker , scope , buckets ) :
"""
:param store_moniker:
:param scope:
:param buckets:
"""
_logger.info( '...search..handle message flags...' )
flag_set = { 'seen' ,
'flagged' ,
'answered' ,
'deleted' ,
'recent' ,
'draft'
}
unflag_set = { 'unseen' ,
'unflagged' ,
'unanswered' ,
'undeleted' ,
'undraft'
}
flags_orthogonal_set = { 'old' ,
'_new' ,
'keyword'
}
flags = []
unflags = []
orthogonal = []
for key , val in scope.items() :
if ( key in flag_set ) and ( val is True ) :
flags.append( '\\' + key.title() )
elif ( key in unflag_set ) and ( val is True ) :
unflags.append( '\\' + key[2:].title() )
elif ( ( key in flags_orthogonal_set ) and ( val is True ) ) or \
( ( key in flags_orthogonal_set ) and ( type( val ) is str ) and ( len( val ) is not 0 ) ):
orthogonal.append( key )
bucket = None
if len( flags ) is not 0 :
bucket = { "$match" : { 'message_flags' : { '$in' : flags } } }
buckets.append( bucket )
if len( unflags ) is not 0 :
bucket = { "$match" : { 'message_flags' : { "$not" : { '$in' : unflags } } } }
buckets.append( bucket )
if len( orthogonal ) is not 0 :
if '_new' in orthogonal :
bucket = { "$match" : { 'message_flags' : { '$in' : '[\\Recent]' } ,
"$not" : { '$in' : ['\\Seen'] } } }
buckets.append( bucket )
if 'old' in orthogonal :
bucket = { "$match" : { 'message_flags' : {"$not" : { '$in' : ['\\Recent'] } } } }
buckets.append( bucket )
if 'keyword' in orthogonal :
bucket = { "$match" : { 'message_flags' : { '$in' : [scope['keyword']] } } }
buckets.append( bucket )
# -------------------------------------------------------------------------------------------------
@tornado.gen.coroutine
def handle_message_headers( store_moniker , scope , buckets ) :
"""
:param store_moniker:
:param scope:
:param buckets:
:return:
"""
_logger.info( '...search..handle message headers...' )
root_headers_set = { 'from' ,
'to' ,
'cc' ,
'bcc' ,
'subject'
}
root_headers = []
for key , val in scope.items() :
if ( key in root_headers_set ) and ( len( val ) is not 0 ) :
root_headers.append( key )
for hdr in root_headers :
bucket = { "$match" : { 'headers.%s' % hdr : { "$regex" : scope[hdr] } } }
buckets.append( bucket )
# headers array
hdrs = scope['headers']
for atom in hdrs :
v = yield chomp( atom['value'] )
hdr_val = str()
# per rfc3501 , zero length string denotes wildcard match for this header field
if len( v ) is 0 :
hdr_val = '.'
else :
hdr_val = v
bucket = { "$match" : { 'headers.%s' % atom['moniker'].lower() : { "$regex" : hdr_val } } }
buckets.append( bucket )
# -------------------------------------------------------------------------------------------------
@tornado.gen.coroutine
def handle_message_text( store_moniker , scope , buckets ) :
"""
:param store_moniker:
:param scope:
:param buckets:
:return:
"""
_logger.info( '...search..handle message text...' )
text_set = { 'text' ,
'body' ,
}
text_monikers = []
for key , val in scope.items() :
if ( key in text_set ) and ( len( val ) is not 0 ) :
text_monikers.append( key )
bucket = { "$lookup" : { "from" : "domain_materialized_search_gadget" ,
"localField" : "_id" , "foreignField" : "message_id" , "as" : "gadget"}}
buckets.append( bucket )
# lookup returns array; unwind expands array into discrete objects
bucket = { "$unwind" : "$gadget" }
buckets.append( bucket )
# case insensitive regex match
if 'body' in text_monikers :
bucket = { "$match" : { "gadget.body" : { "$regex" : scope['body'].strip( '"' ) ,
"$options" : "i" } ,
"domain_canonical_store" : store_moniker } }
buckets.append( bucket )
elif 'text' in text_monikers :
bucket = { "$match" : { "$text" : { "$search" : scope['text'].strip( '"' ) ,
"$caseSensitive" : False } ,
"domain_canonical_store" : store_moniker } }
buckets.append( bucket )
# -------------------------------------------------------------------------------------------------
@tornado.gen.coroutine
def handle_message_intervals( store_moniker , scope , buckets ) :
"""
:param store_moniker:
:param scope:
:param buckets:
:return:
"""
_logger.info( '...search..handle message intervals...' )
internal_date_dict = { 'before' : '$lt' ,
'on' : 'nil' ,
'since' : '$gte'
}
rfc822_date_dict = { 'sentbefore' : '$lt' ,
'senton' : 'nil' ,
'sentsince' : '$gte'
}
internal_date = []
rfc822_date = []
for key , val in scope.items() :
if ( key in internal_date_dict ) and ( len( val ) is not 0 ) :
internal_date.append( key )
for interval in internal_date :
iso , iso_plus_one = yield imap_internal_to_iso_date( scope[interval] )
if interval == 'on' :
bucket = { "$match" : { "postmark" : { "$gte" : iso ,
"$lt" : iso_plus_one } } }
buckets.append( bucket )
else :
bucket = { "$match" : { "postmark" : { internal_date_dict[interval] : iso } } }
buckets.append( bucket )
for key , val in scope.items() :
if ( key in rfc822_date_dict ) and ( len( val ) is not 0 ) :
rfc822_date.append( key )
for interval in rfc822_date :
iso , iso_plus_one = yield imap_internal_to_iso_date( scope[interval] )
if interval == 'senton' :
bucket = { "$match" : { "date_iso" : { "$gte" : iso ,
"$lt" : iso_plus_one } } }
buckets.append( bucket )
else :
bucket = { "$match" : { "date_iso" : { rfc822_date_dict[interval] : iso } } }
buckets.append( bucket )
# -------------------------------------------------------------------------------------------------
@tornado.gen.coroutine
def handle_message_sizes( store_moniker , scope , buckets ) :
"""
:param store_moniker:
:param scope:
:param buckets:
:return:
"""
_logger.info( '...search..handle message sizes...' )
sizes_dict = { 'larger' : "$gt" ,
'smaller' : "$lt"
}
sizes = []
for key , val in scope.items() :
if ( key in sizes_dict ) and ( val is not 0 ) :
sizes.append( key )
for sz in sizes :
bucket = { "$match" : { "payload_size" : { sizes_dict[sz] : scope[sz] } } }
buckets.append( bucket )
# -------------------------------------------------------------------------------------------------
@tornado.gen.coroutine
def process_one_scope( moniker , scope , buckets ) :
"""
:param moniker:
:param scope:
:param buckets:
:return:
"""
_logger.info( '...search..process one scope...' )
# we walk the search tree in multiple passes to build
# our aggregate/map reduce structure
# not a big deal baecause the search structure is static
# and we're using set semantics as well as all this
# being async; not we aren't executing anything here
#
# message text
#
# the text aggregation has to be first in the pipline
# per debug crash output
# docs are silent on this; we'll just go with it
yield handle_message_text( moniker , scope , buckets )
# message sequences
yield handle_message_ids( moniker , scope , buckets )
# message flags
yield handle_message_flags( moniker , scope , buckets )
# message headers
yield handle_message_headers( moniker , scope , buckets )
# message intervals
yield handle_message_intervals( moniker , scope , buckets )
# message sizes
yield handle_message_sizes( moniker , scope , buckets )
# --------------------------------------------------------------------------------------------------------
class imap2017_search_result_set_from_query( tornado.web.RequestHandler ) :
"""
POST return result set form posted json query
:return
"""
_logger.info( '...imap2017_search_result_set_from_query...' )
@gen.coroutine
def post( self ) :
self.output = set()
sequences = []
try :
js = json_decode( self.request.body )
search_json = json.loads( js["search_json"] )
moniker = '%s_%s' % ( js['moniker'] ,
js['folder'] )
db = mongo_db.domain_existential_message
scopes = search_json['imap_search_scopes']
top_scope = scopes[0]
# the range of permissbile sequences/uids
# is constrained by the top enclosing scope
id_dict = dict( zip( top_scope['uid_expanded_sequence'] ,
top_scope['expanded_sequence'] ) )
buckets = []
for scope in scopes :
process_one_scope( moniker , scope , buckets )
bucket_results = db.aggregate( buckets )
while ( yield bucket_results.fetch_next ) :
doc = bucket_results.next_object()
if doc['uid'] in id_dict :
# todo - specify uids or sequence here
self.output.add( id_dict[doc['uid']] )
print( buckets )
except Exception as e :
_logger.error( '...imap2017_search_result_set_from_query, %s' % str( e ) )
ret = { "search_results" : "bad" }
self.write( json_util.dumps( ret , default=json_util.default ) )
self.write( json_util.dumps( list( self.output ) , default=json_util.default ) )
# -------------------------------------------------------------------------------------------------------
settings = {
"static_path": os.path.join(os.path.dirname(__file__), "static"),
}
# create the web server with async coroutines
_logger.info( '...initializing search services....' )
imap_search_application = tornado.web.Application([ ( r'/cci_mta_trinity/search_result_set_from_query', imap2017_search_result_set_from_query ) ,
] , qDebug=True , db=mongo_db , **settings )
|
|
"""
Printing tools.
"""
import sys
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Mapping,
Optional,
Sequence,
Sized,
Tuple,
TypeVar,
Union,
)
from pandas._config import get_option
from pandas.core.dtypes.inference import is_sequence
EscapeChars = Union[Mapping[str, str], Iterable[str]]
_KT = TypeVar("_KT")
_VT = TypeVar("_VT")
def adjoin(space: int, *lists: List[str], **kwargs) -> str:
"""
Glues together two sets of strings using the amount of space requested.
The idea is to prettify.
----------
space : int
number of spaces for padding
lists : str
list of str which being joined
strlen : callable
function used to calculate the length of each str. Needed for unicode
handling.
justfunc : callable
function used to justify str. Needed for unicode handling.
"""
strlen = kwargs.pop("strlen", len)
justfunc = kwargs.pop("justfunc", justify)
out_lines = []
newLists = []
lengths = [max(map(strlen, x)) + space for x in lists[:-1]]
# not the last one
lengths.append(max(map(len, lists[-1])))
maxLen = max(map(len, lists))
for i, lst in enumerate(lists):
nl = justfunc(lst, lengths[i], mode="left")
nl.extend([" " * lengths[i]] * (maxLen - len(lst)))
newLists.append(nl)
toJoin = zip(*newLists)
for lines in toJoin:
out_lines.append("".join(lines))
return "\n".join(out_lines)
def justify(texts: Iterable[str], max_len: int, mode: str = "right") -> List[str]:
"""
Perform ljust, center, rjust against string or list-like
"""
if mode == "left":
return [x.ljust(max_len) for x in texts]
elif mode == "center":
return [x.center(max_len) for x in texts]
else:
return [x.rjust(max_len) for x in texts]
# Unicode consolidation
# ---------------------
#
# pprinting utility functions for generating Unicode text or
# bytes(3.x)/str(2.x) representations of objects.
# Try to use these as much as possible rather than rolling your own.
#
# When to use
# -----------
#
# 1) If you're writing code internal to pandas (no I/O directly involved),
# use pprint_thing().
#
# It will always return unicode text which can handled by other
# parts of the package without breakage.
#
# 2) if you need to write something out to file, use
# pprint_thing_encoded(encoding).
#
# If no encoding is specified, it defaults to utf-8. Since encoding pure
# ascii with utf-8 is a no-op you can safely use the default utf-8 if you're
# working with straight ascii.
def _pprint_seq(
seq: Sequence, _nest_lvl: int = 0, max_seq_items: Optional[int] = None, **kwds
) -> str:
"""
internal. pprinter for iterables. you should probably use pprint_thing()
rather than calling this directly.
bounds length of printed sequence, depending on options
"""
if isinstance(seq, set):
fmt = "{{{body}}}"
else:
fmt = "[{body}]" if hasattr(seq, "__setitem__") else "({body})"
if max_seq_items is False:
nitems = len(seq)
else:
nitems = max_seq_items or get_option("max_seq_items") or len(seq)
s = iter(seq)
# handle sets, no slicing
r = [
pprint_thing(next(s), _nest_lvl + 1, max_seq_items=max_seq_items, **kwds)
for i in range(min(nitems, len(seq)))
]
body = ", ".join(r)
if nitems < len(seq):
body += ", ..."
elif isinstance(seq, tuple) and len(seq) == 1:
body += ","
return fmt.format(body=body)
def _pprint_dict(
seq: Mapping, _nest_lvl: int = 0, max_seq_items: Optional[int] = None, **kwds
) -> str:
"""
internal. pprinter for iterables. you should probably use pprint_thing()
rather than calling this directly.
"""
fmt = "{{{things}}}"
pairs = []
pfmt = "{key}: {val}"
if max_seq_items is False:
nitems = len(seq)
else:
nitems = max_seq_items or get_option("max_seq_items") or len(seq)
for k, v in list(seq.items())[:nitems]:
pairs.append(
pfmt.format(
key=pprint_thing(k, _nest_lvl + 1, max_seq_items=max_seq_items, **kwds),
val=pprint_thing(v, _nest_lvl + 1, max_seq_items=max_seq_items, **kwds),
)
)
if nitems < len(seq):
return fmt.format(things=", ".join(pairs) + ", ...")
else:
return fmt.format(things=", ".join(pairs))
def pprint_thing(
thing: Any,
_nest_lvl: int = 0,
escape_chars: Optional[EscapeChars] = None,
default_escapes: bool = False,
quote_strings: bool = False,
max_seq_items: Optional[int] = None,
) -> str:
"""
This function is the sanctioned way of converting objects
to a string representation and properly handles nested sequences.
Parameters
----------
thing : anything to be formatted
_nest_lvl : internal use only. pprint_thing() is mutually-recursive
with pprint_sequence, this argument is used to keep track of the
current nesting level, and limit it.
escape_chars : list or dict, optional
Characters to escape. If a dict is passed the values are the
replacements
default_escapes : bool, default False
Whether the input escape characters replaces or adds to the defaults
max_seq_items : int or None, default None
Pass through to other pretty printers to limit sequence printing
Returns
-------
str
"""
def as_escaped_string(
thing: Any, escape_chars: Optional[EscapeChars] = escape_chars
) -> str:
translate = {"\t": r"\t", "\n": r"\n", "\r": r"\r"}
if isinstance(escape_chars, dict):
if default_escapes:
translate.update(escape_chars)
else:
translate = escape_chars
escape_chars = list(escape_chars.keys())
else:
escape_chars = escape_chars or ()
result = str(thing)
for c in escape_chars:
result = result.replace(c, translate[c])
return result
if hasattr(thing, "__next__"):
return str(thing)
elif isinstance(thing, dict) and _nest_lvl < get_option(
"display.pprint_nest_depth"
):
result = _pprint_dict(
thing, _nest_lvl, quote_strings=True, max_seq_items=max_seq_items
)
elif is_sequence(thing) and _nest_lvl < get_option("display.pprint_nest_depth"):
result = _pprint_seq(
thing,
_nest_lvl,
escape_chars=escape_chars,
quote_strings=quote_strings,
max_seq_items=max_seq_items,
)
elif isinstance(thing, str) and quote_strings:
result = f"'{as_escaped_string(thing)}'"
else:
result = as_escaped_string(thing)
return result
def pprint_thing_encoded(
object, encoding: str = "utf-8", errors: str = "replace"
) -> bytes:
value = pprint_thing(object) # get unicode representation of object
return value.encode(encoding, errors)
def enable_data_resource_formatter(enable: bool) -> None:
if "IPython" not in sys.modules:
# definitely not in IPython
return
from IPython import get_ipython
ip = get_ipython()
if ip is None:
# still not in IPython
return
formatters = ip.display_formatter.formatters
mimetype = "application/vnd.dataresource+json"
if enable:
if mimetype not in formatters:
# define tableschema formatter
from IPython.core.formatters import BaseFormatter
class TableSchemaFormatter(BaseFormatter):
print_method = "_repr_data_resource_"
_return_type = (dict,)
# register it:
formatters[mimetype] = TableSchemaFormatter()
# enable it if it's been disabled:
formatters[mimetype].enabled = True
else:
# unregister tableschema mime-type
if mimetype in formatters:
formatters[mimetype].enabled = False
def default_pprint(thing: Any, max_seq_items: Optional[int] = None) -> str:
return pprint_thing(
thing,
escape_chars=("\t", "\r", "\n"),
quote_strings=True,
max_seq_items=max_seq_items,
)
def format_object_summary(
obj,
formatter: Callable,
is_justify: bool = True,
name: Optional[str] = None,
indent_for_name: bool = True,
line_break_each_value: bool = False,
) -> str:
"""
Return the formatted obj as a unicode string
Parameters
----------
obj : object
must be iterable and support __getitem__
formatter : callable
string formatter for an element
is_justify : bool
should justify the display
name : name, optional
defaults to the class name of the obj
indent_for_name : bool, default True
Whether subsequent lines should be indented to
align with the name.
line_break_each_value : bool, default False
If True, inserts a line break for each value of ``obj``.
If False, only break lines when the a line of values gets wider
than the display width.
.. versionadded:: 0.25.0
Returns
-------
summary string
"""
from pandas.io.formats.console import get_console_size
from pandas.io.formats.format import get_adjustment
display_width, _ = get_console_size()
if display_width is None:
display_width = get_option("display.width") or 80
if name is None:
name = type(obj).__name__
if indent_for_name:
name_len = len(name)
space1 = f'\n{(" " * (name_len + 1))}'
space2 = f'\n{(" " * (name_len + 2))}'
else:
space1 = "\n"
space2 = "\n " # space for the opening '['
n = len(obj)
if line_break_each_value:
# If we want to vertically align on each value of obj, we need to
# separate values by a line break and indent the values
sep = ",\n " + " " * len(name)
else:
sep = ","
max_seq_items = get_option("display.max_seq_items") or n
# are we a truncated display
is_truncated = n > max_seq_items
# adj can optionally handle unicode eastern asian width
adj = get_adjustment()
def _extend_line(
s: str, line: str, value: str, display_width: int, next_line_prefix: str
) -> Tuple[str, str]:
if adj.len(line.rstrip()) + adj.len(value.rstrip()) >= display_width:
s += line.rstrip()
line = next_line_prefix
line += value
return s, line
def best_len(values: List[str]) -> int:
if values:
return max(adj.len(x) for x in values)
else:
return 0
close = ", "
if n == 0:
summary = f"[]{close}"
elif n == 1 and not line_break_each_value:
first = formatter(obj[0])
summary = f"[{first}]{close}"
elif n == 2 and not line_break_each_value:
first = formatter(obj[0])
last = formatter(obj[-1])
summary = f"[{first}, {last}]{close}"
else:
if max_seq_items == 1:
# If max_seq_items=1 show only last element
head = []
tail = [formatter(x) for x in obj[-1:]]
elif n > max_seq_items:
n = min(max_seq_items // 2, 10)
head = [formatter(x) for x in obj[:n]]
tail = [formatter(x) for x in obj[-n:]]
else:
head = []
tail = [formatter(x) for x in obj]
# adjust all values to max length if needed
if is_justify:
if line_break_each_value:
# Justify each string in the values of head and tail, so the
# strings will right align when head and tail are stacked
# vertically.
head, tail = _justify(head, tail)
elif is_truncated or not (
len(", ".join(head)) < display_width
and len(", ".join(tail)) < display_width
):
# Each string in head and tail should align with each other
max_length = max(best_len(head), best_len(tail))
head = [x.rjust(max_length) for x in head]
tail = [x.rjust(max_length) for x in tail]
# If we are not truncated and we are only a single
# line, then don't justify
if line_break_each_value:
# Now head and tail are of type List[Tuple[str]]. Below we
# convert them into List[str], so there will be one string per
# value. Also truncate items horizontally if wider than
# max_space
max_space = display_width - len(space2)
value = tail[0]
for max_items in reversed(range(1, len(value) + 1)):
pprinted_seq = _pprint_seq(value, max_seq_items=max_items)
if len(pprinted_seq) < max_space:
break
head = [_pprint_seq(x, max_seq_items=max_items) for x in head]
tail = [_pprint_seq(x, max_seq_items=max_items) for x in tail]
summary = ""
line = space2
for max_items in range(len(head)):
word = head[max_items] + sep + " "
summary, line = _extend_line(summary, line, word, display_width, space2)
if is_truncated:
# remove trailing space of last line
summary += line.rstrip() + space2 + "..."
line = space2
for max_items in range(len(tail) - 1):
word = tail[max_items] + sep + " "
summary, line = _extend_line(summary, line, word, display_width, space2)
# last value: no sep added + 1 space of width used for trailing ','
summary, line = _extend_line(summary, line, tail[-1], display_width - 2, space2)
summary += line
# right now close is either '' or ', '
# Now we want to include the ']', but not the maybe space.
close = "]" + close.rstrip(" ")
summary += close
if len(summary) > (display_width) or line_break_each_value:
summary += space1
else: # one row
summary += " "
# remove initial space
summary = "[" + summary[len(space2) :]
return summary
def _justify(
head: List[Sequence[str]], tail: List[Sequence[str]]
) -> Tuple[List[Tuple[str, ...]], List[Tuple[str, ...]]]:
"""
Justify items in head and tail, so they are right-aligned when stacked.
Parameters
----------
head : list-like of list-likes of strings
tail : list-like of list-likes of strings
Returns
-------
tuple of list of tuples of strings
Same as head and tail, but items are right aligned when stacked
vertically.
Examples
--------
>>> _justify([['a', 'b']], [['abc', 'abcd']])
([(' a', ' b')], [('abc', 'abcd')])
"""
combined = head + tail
# For each position for the sequences in ``combined``,
# find the length of the largest string.
max_length = [0] * len(combined[0])
for inner_seq in combined:
length = [len(item) for item in inner_seq]
max_length = [max(x, y) for x, y in zip(max_length, length)]
# justify each item in each list-like in head and tail using max_length
head = [
tuple(x.rjust(max_len) for x, max_len in zip(seq, max_length)) for seq in head
]
tail = [
tuple(x.rjust(max_len) for x, max_len in zip(seq, max_length)) for seq in tail
]
# https://github.com/python/mypy/issues/4975
# error: Incompatible return value type (got "Tuple[List[Sequence[str]],
# List[Sequence[str]]]", expected "Tuple[List[Tuple[str, ...]],
# List[Tuple[str, ...]]]")
return head, tail # type: ignore[return-value]
def format_object_attrs(
obj: Sized, include_dtype: bool = True
) -> List[Tuple[str, Union[str, int]]]:
"""
Return a list of tuples of the (attr, formatted_value)
for common attrs, including dtype, name, length
Parameters
----------
obj : object
Must be sized.
include_dtype : bool
If False, dtype won't be in the returned list
Returns
-------
list of 2-tuple
"""
attrs: List[Tuple[str, Union[str, int]]] = []
if hasattr(obj, "dtype") and include_dtype:
# error: "Sized" has no attribute "dtype"
attrs.append(("dtype", f"'{obj.dtype}'")) # type: ignore[attr-defined]
if getattr(obj, "name", None) is not None:
# error: "Sized" has no attribute "name"
attrs.append(("name", default_pprint(obj.name))) # type: ignore[attr-defined]
# error: "Sized" has no attribute "names"
elif getattr(obj, "names", None) is not None and any(
obj.names # type: ignore[attr-defined]
):
# error: "Sized" has no attribute "names"
attrs.append(("names", default_pprint(obj.names))) # type: ignore[attr-defined]
max_seq_items = get_option("display.max_seq_items") or len(obj)
if len(obj) > max_seq_items:
attrs.append(("length", len(obj)))
return attrs
class PrettyDict(Dict[_KT, _VT]):
"""Dict extension to support abbreviated __repr__"""
def __repr__(self) -> str:
return pprint_thing(self)
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.auth.transport.requests import AuthorizedSession # type: ignore
import json # type: ignore
import grpc # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.api_core import exceptions as core_exceptions
from google.api_core import retry as retries
from google.api_core import rest_helpers
from google.api_core import rest_streaming
from google.api_core import path_template
from google.api_core import gapic_v1
from requests import __version__ as requests_version
import dataclasses
import re
from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union
import warnings
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.compute_v1.types import compute
from .base import (
RegionHealthCheckServicesTransport,
DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO,
)
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version,
grpc_version=None,
rest_version=requests_version,
)
class RegionHealthCheckServicesRestInterceptor:
"""Interceptor for RegionHealthCheckServices.
Interceptors are used to manipulate requests, request metadata, and responses
in arbitrary ways.
Example use cases include:
* Logging
* Verifying requests according to service or custom semantics
* Stripping extraneous information from responses
These use cases and more can be enabled by injecting an
instance of a custom subclass when constructing the RegionHealthCheckServicesRestTransport.
.. code-block:: python
class MyCustomRegionHealthCheckServicesInterceptor(RegionHealthCheckServicesRestInterceptor):
def pre_delete(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_delete(response):
logging.log(f"Received response: {response}")
def pre_get(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_get(response):
logging.log(f"Received response: {response}")
def pre_insert(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_insert(response):
logging.log(f"Received response: {response}")
def pre_list(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_list(response):
logging.log(f"Received response: {response}")
def pre_patch(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_patch(response):
logging.log(f"Received response: {response}")
transport = RegionHealthCheckServicesRestTransport(interceptor=MyCustomRegionHealthCheckServicesInterceptor())
client = RegionHealthCheckServicesClient(transport=transport)
"""
def pre_delete(
self,
request: compute.DeleteRegionHealthCheckServiceRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[
compute.DeleteRegionHealthCheckServiceRequest, Sequence[Tuple[str, str]]
]:
"""Pre-rpc interceptor for delete
Override in a subclass to manipulate the request or metadata
before they are sent to the RegionHealthCheckServices server.
"""
return request, metadata
def post_delete(self, response: compute.Operation) -> compute.Operation:
"""Post-rpc interceptor for delete
Override in a subclass to manipulate the response
after it is returned by the RegionHealthCheckServices server but before
it is returned to user code.
"""
return response
def pre_get(
self,
request: compute.GetRegionHealthCheckServiceRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.GetRegionHealthCheckServiceRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for get
Override in a subclass to manipulate the request or metadata
before they are sent to the RegionHealthCheckServices server.
"""
return request, metadata
def post_get(
self, response: compute.HealthCheckService
) -> compute.HealthCheckService:
"""Post-rpc interceptor for get
Override in a subclass to manipulate the response
after it is returned by the RegionHealthCheckServices server but before
it is returned to user code.
"""
return response
def pre_insert(
self,
request: compute.InsertRegionHealthCheckServiceRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[
compute.InsertRegionHealthCheckServiceRequest, Sequence[Tuple[str, str]]
]:
"""Pre-rpc interceptor for insert
Override in a subclass to manipulate the request or metadata
before they are sent to the RegionHealthCheckServices server.
"""
return request, metadata
def post_insert(self, response: compute.Operation) -> compute.Operation:
"""Post-rpc interceptor for insert
Override in a subclass to manipulate the response
after it is returned by the RegionHealthCheckServices server but before
it is returned to user code.
"""
return response
def pre_list(
self,
request: compute.ListRegionHealthCheckServicesRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.ListRegionHealthCheckServicesRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for list
Override in a subclass to manipulate the request or metadata
before they are sent to the RegionHealthCheckServices server.
"""
return request, metadata
def post_list(
self, response: compute.HealthCheckServicesList
) -> compute.HealthCheckServicesList:
"""Post-rpc interceptor for list
Override in a subclass to manipulate the response
after it is returned by the RegionHealthCheckServices server but before
it is returned to user code.
"""
return response
def pre_patch(
self,
request: compute.PatchRegionHealthCheckServiceRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.PatchRegionHealthCheckServiceRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for patch
Override in a subclass to manipulate the request or metadata
before they are sent to the RegionHealthCheckServices server.
"""
return request, metadata
def post_patch(self, response: compute.Operation) -> compute.Operation:
"""Post-rpc interceptor for patch
Override in a subclass to manipulate the response
after it is returned by the RegionHealthCheckServices server but before
it is returned to user code.
"""
return response
@dataclasses.dataclass
class RegionHealthCheckServicesRestStub:
_session: AuthorizedSession
_host: str
_interceptor: RegionHealthCheckServicesRestInterceptor
class RegionHealthCheckServicesRestTransport(RegionHealthCheckServicesTransport):
"""REST backend transport for RegionHealthCheckServices.
The RegionHealthCheckServices API.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends JSON representations of protocol buffers over HTTP/1.1
"""
_STUBS: Dict[str, RegionHealthCheckServicesRestStub] = {}
def __init__(
self,
*,
host: str = "compute.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
url_scheme: str = "https",
interceptor: Optional[RegionHealthCheckServicesRestInterceptor] = None,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client
certificate to configure mutual TLS HTTP channel. It is ignored
if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you are developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
url_scheme: the protocol scheme for the API endpoint. Normally
"https", but for testing or local servers,
"http" can be specified.
"""
# Run the base constructor
# TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc.
# TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the
# credentials object
maybe_url_match = re.match("^(?P<scheme>http(?:s)?://)?(?P<host>.*)$", host)
if maybe_url_match is None:
raise ValueError(
f"Unexpected hostname structure: {host}"
) # pragma: NO COVER
url_match_items = maybe_url_match.groupdict()
host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host
super().__init__(
host=host,
credentials=credentials,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
self._session = AuthorizedSession(
self._credentials, default_host=self.DEFAULT_HOST
)
if client_cert_source_for_mtls:
self._session.configure_mtls_channel(client_cert_source_for_mtls)
self._interceptor = interceptor or RegionHealthCheckServicesRestInterceptor()
self._prep_wrapped_messages(client_info)
class _Delete(RegionHealthCheckServicesRestStub):
def __hash__(self):
return hash("Delete")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.DeleteRegionHealthCheckServiceRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Call the delete method over HTTP.
Args:
request (~.compute.DeleteRegionHealthCheckServiceRequest):
The request object. A request message for
RegionHealthCheckServices.Delete. See
the method description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Operation:
Represents an Operation resource. Google Compute Engine
has three Operation resources: \*
`Global </compute/docs/reference/rest/v1/globalOperations>`__
\*
`Regional </compute/docs/reference/rest/v1/regionOperations>`__
\*
`Zonal </compute/docs/reference/rest/v1/zoneOperations>`__
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses. Operations can be global, regional or zonal.
- For global operations, use the ``globalOperations``
resource. - For regional operations, use the
``regionOperations`` resource. - For zonal operations,
use the ``zonalOperations`` resource. For more
information, read Global, Regional, and Zonal Resources.
"""
http_options: List[Dict[str, str]] = [
{
"method": "delete",
"uri": "/compute/v1/projects/{project}/regions/{region}/healthCheckServices/{health_check_service}",
},
]
request, metadata = self._interceptor.pre_delete(request, metadata)
request_kwargs = compute.DeleteRegionHealthCheckServiceRequest.to_dict(
request
)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.DeleteRegionHealthCheckServiceRequest.to_json(
compute.DeleteRegionHealthCheckServiceRequest(
transcoded_request["query_params"]
),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.Operation.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_delete(resp)
return resp
class _Get(RegionHealthCheckServicesRestStub):
def __hash__(self):
return hash("Get")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.GetRegionHealthCheckServiceRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.HealthCheckService:
r"""Call the get method over HTTP.
Args:
request (~.compute.GetRegionHealthCheckServiceRequest):
The request object. A request message for
RegionHealthCheckServices.Get. See the
method description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.HealthCheckService:
Represents a Health-Check as a
Service resource.
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/compute/v1/projects/{project}/regions/{region}/healthCheckServices/{health_check_service}",
},
]
request, metadata = self._interceptor.pre_get(request, metadata)
request_kwargs = compute.GetRegionHealthCheckServiceRequest.to_dict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.GetRegionHealthCheckServiceRequest.to_json(
compute.GetRegionHealthCheckServiceRequest(
transcoded_request["query_params"]
),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.HealthCheckService.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_get(resp)
return resp
class _Insert(RegionHealthCheckServicesRestStub):
def __hash__(self):
return hash("Insert")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.InsertRegionHealthCheckServiceRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Call the insert method over HTTP.
Args:
request (~.compute.InsertRegionHealthCheckServiceRequest):
The request object. A request message for
RegionHealthCheckServices.Insert. See
the method description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Operation:
Represents an Operation resource. Google Compute Engine
has three Operation resources: \*
`Global </compute/docs/reference/rest/v1/globalOperations>`__
\*
`Regional </compute/docs/reference/rest/v1/regionOperations>`__
\*
`Zonal </compute/docs/reference/rest/v1/zoneOperations>`__
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses. Operations can be global, regional or zonal.
- For global operations, use the ``globalOperations``
resource. - For regional operations, use the
``regionOperations`` resource. - For zonal operations,
use the ``zonalOperations`` resource. For more
information, read Global, Regional, and Zonal Resources.
"""
http_options: List[Dict[str, str]] = [
{
"method": "post",
"uri": "/compute/v1/projects/{project}/regions/{region}/healthCheckServices",
"body": "health_check_service_resource",
},
]
request, metadata = self._interceptor.pre_insert(request, metadata)
request_kwargs = compute.InsertRegionHealthCheckServiceRequest.to_dict(
request
)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
# Jsonify the request body
body = compute.HealthCheckService.to_json(
compute.HealthCheckService(transcoded_request["body"]),
including_default_value_fields=False,
use_integers_for_enums=False,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.InsertRegionHealthCheckServiceRequest.to_json(
compute.InsertRegionHealthCheckServiceRequest(
transcoded_request["query_params"]
),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.Operation.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_insert(resp)
return resp
class _List(RegionHealthCheckServicesRestStub):
def __hash__(self):
return hash("List")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.ListRegionHealthCheckServicesRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.HealthCheckServicesList:
r"""Call the list method over HTTP.
Args:
request (~.compute.ListRegionHealthCheckServicesRequest):
The request object. A request message for
RegionHealthCheckServices.List. See the
method description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.HealthCheckServicesList:
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/compute/v1/projects/{project}/regions/{region}/healthCheckServices",
},
]
request, metadata = self._interceptor.pre_list(request, metadata)
request_kwargs = compute.ListRegionHealthCheckServicesRequest.to_dict(
request
)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.ListRegionHealthCheckServicesRequest.to_json(
compute.ListRegionHealthCheckServicesRequest(
transcoded_request["query_params"]
),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.HealthCheckServicesList.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_list(resp)
return resp
class _Patch(RegionHealthCheckServicesRestStub):
def __hash__(self):
return hash("Patch")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.PatchRegionHealthCheckServiceRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Call the patch method over HTTP.
Args:
request (~.compute.PatchRegionHealthCheckServiceRequest):
The request object. A request message for
RegionHealthCheckServices.Patch. See the
method description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Operation:
Represents an Operation resource. Google Compute Engine
has three Operation resources: \*
`Global </compute/docs/reference/rest/v1/globalOperations>`__
\*
`Regional </compute/docs/reference/rest/v1/regionOperations>`__
\*
`Zonal </compute/docs/reference/rest/v1/zoneOperations>`__
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses. Operations can be global, regional or zonal.
- For global operations, use the ``globalOperations``
resource. - For regional operations, use the
``regionOperations`` resource. - For zonal operations,
use the ``zonalOperations`` resource. For more
information, read Global, Regional, and Zonal Resources.
"""
http_options: List[Dict[str, str]] = [
{
"method": "patch",
"uri": "/compute/v1/projects/{project}/regions/{region}/healthCheckServices/{health_check_service}",
"body": "health_check_service_resource",
},
]
request, metadata = self._interceptor.pre_patch(request, metadata)
request_kwargs = compute.PatchRegionHealthCheckServiceRequest.to_dict(
request
)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
# Jsonify the request body
body = compute.HealthCheckService.to_json(
compute.HealthCheckService(transcoded_request["body"]),
including_default_value_fields=False,
use_integers_for_enums=False,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.PatchRegionHealthCheckServiceRequest.to_json(
compute.PatchRegionHealthCheckServiceRequest(
transcoded_request["query_params"]
),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.Operation.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_patch(resp)
return resp
@property
def delete(
self,
) -> Callable[[compute.DeleteRegionHealthCheckServiceRequest], compute.Operation]:
stub = self._STUBS.get("delete")
if not stub:
stub = self._STUBS["delete"] = self._Delete(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
@property
def get(
self,
) -> Callable[
[compute.GetRegionHealthCheckServiceRequest], compute.HealthCheckService
]:
stub = self._STUBS.get("get")
if not stub:
stub = self._STUBS["get"] = self._Get(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
@property
def insert(
self,
) -> Callable[[compute.InsertRegionHealthCheckServiceRequest], compute.Operation]:
stub = self._STUBS.get("insert")
if not stub:
stub = self._STUBS["insert"] = self._Insert(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
@property
def list(
self,
) -> Callable[
[compute.ListRegionHealthCheckServicesRequest], compute.HealthCheckServicesList
]:
stub = self._STUBS.get("list")
if not stub:
stub = self._STUBS["list"] = self._List(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
@property
def patch(
self,
) -> Callable[[compute.PatchRegionHealthCheckServiceRequest], compute.Operation]:
stub = self._STUBS.get("patch")
if not stub:
stub = self._STUBS["patch"] = self._Patch(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
def close(self):
self._session.close()
__all__ = ("RegionHealthCheckServicesRestTransport",)
|
|
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class TaskService100TaskService(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
TaskService100TaskService - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'odata_context': 'Odata400Context',
'odata_id': 'Odata400Id',
'odata_type': 'Odata400Type',
'completed_task_over_write_policy': 'TaskService100OverWritePolicy',
'description': 'ResourceDescription',
'id': 'ResourceId',
'life_cycle_event_on_task_state_change': 'bool',
'name': 'ResourceName',
'oem': 'ResourceOem',
'status': 'ResourceStatus',
'tasks': 'TaskCollectionTaskCollection'
}
self.attribute_map = {
'odata_context': '@odata.context',
'odata_id': '@odata.id',
'odata_type': '@odata.type',
'completed_task_over_write_policy': 'CompletedTaskOverWritePolicy',
'description': 'Description',
'id': 'Id',
'life_cycle_event_on_task_state_change': 'LifeCycleEventOnTaskStateChange',
'name': 'Name',
'oem': 'Oem',
'status': 'Status',
'tasks': 'Tasks'
}
self._odata_context = None
self._odata_id = None
self._odata_type = None
self._completed_task_over_write_policy = None
self._description = None
self._id = None
self._life_cycle_event_on_task_state_change = None
self._name = None
self._oem = None
self._status = None
self._tasks = None
@property
def odata_context(self):
"""
Gets the odata_context of this TaskService100TaskService.
:return: The odata_context of this TaskService100TaskService.
:rtype: Odata400Context
"""
return self._odata_context
@odata_context.setter
def odata_context(self, odata_context):
"""
Sets the odata_context of this TaskService100TaskService.
:param odata_context: The odata_context of this TaskService100TaskService.
:type: Odata400Context
"""
self._odata_context = odata_context
@property
def odata_id(self):
"""
Gets the odata_id of this TaskService100TaskService.
:return: The odata_id of this TaskService100TaskService.
:rtype: Odata400Id
"""
return self._odata_id
@odata_id.setter
def odata_id(self, odata_id):
"""
Sets the odata_id of this TaskService100TaskService.
:param odata_id: The odata_id of this TaskService100TaskService.
:type: Odata400Id
"""
self._odata_id = odata_id
@property
def odata_type(self):
"""
Gets the odata_type of this TaskService100TaskService.
:return: The odata_type of this TaskService100TaskService.
:rtype: Odata400Type
"""
return self._odata_type
@odata_type.setter
def odata_type(self, odata_type):
"""
Sets the odata_type of this TaskService100TaskService.
:param odata_type: The odata_type of this TaskService100TaskService.
:type: Odata400Type
"""
self._odata_type = odata_type
@property
def completed_task_over_write_policy(self):
"""
Gets the completed_task_over_write_policy of this TaskService100TaskService.
Overwrite policy of completed tasks
:return: The completed_task_over_write_policy of this TaskService100TaskService.
:rtype: TaskService100OverWritePolicy
"""
return self._completed_task_over_write_policy
@completed_task_over_write_policy.setter
def completed_task_over_write_policy(self, completed_task_over_write_policy):
"""
Sets the completed_task_over_write_policy of this TaskService100TaskService.
Overwrite policy of completed tasks
:param completed_task_over_write_policy: The completed_task_over_write_policy of this TaskService100TaskService.
:type: TaskService100OverWritePolicy
"""
self._completed_task_over_write_policy = completed_task_over_write_policy
@property
def description(self):
"""
Gets the description of this TaskService100TaskService.
:return: The description of this TaskService100TaskService.
:rtype: ResourceDescription
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this TaskService100TaskService.
:param description: The description of this TaskService100TaskService.
:type: ResourceDescription
"""
self._description = description
@property
def id(self):
"""
Gets the id of this TaskService100TaskService.
:return: The id of this TaskService100TaskService.
:rtype: ResourceId
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this TaskService100TaskService.
:param id: The id of this TaskService100TaskService.
:type: ResourceId
"""
self._id = id
@property
def life_cycle_event_on_task_state_change(self):
"""
Gets the life_cycle_event_on_task_state_change of this TaskService100TaskService.
Send an Event upon Task State Change.
:return: The life_cycle_event_on_task_state_change of this TaskService100TaskService.
:rtype: bool
"""
return self._life_cycle_event_on_task_state_change
@life_cycle_event_on_task_state_change.setter
def life_cycle_event_on_task_state_change(self, life_cycle_event_on_task_state_change):
"""
Sets the life_cycle_event_on_task_state_change of this TaskService100TaskService.
Send an Event upon Task State Change.
:param life_cycle_event_on_task_state_change: The life_cycle_event_on_task_state_change of this TaskService100TaskService.
:type: bool
"""
self._life_cycle_event_on_task_state_change = life_cycle_event_on_task_state_change
@property
def name(self):
"""
Gets the name of this TaskService100TaskService.
:return: The name of this TaskService100TaskService.
:rtype: ResourceName
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this TaskService100TaskService.
:param name: The name of this TaskService100TaskService.
:type: ResourceName
"""
self._name = name
@property
def oem(self):
"""
Gets the oem of this TaskService100TaskService.
This is the manufacturer/provider specific extension moniker used to divide the Oem object into sections.
:return: The oem of this TaskService100TaskService.
:rtype: ResourceOem
"""
return self._oem
@oem.setter
def oem(self, oem):
"""
Sets the oem of this TaskService100TaskService.
This is the manufacturer/provider specific extension moniker used to divide the Oem object into sections.
:param oem: The oem of this TaskService100TaskService.
:type: ResourceOem
"""
self._oem = oem
@property
def status(self):
"""
Gets the status of this TaskService100TaskService.
:return: The status of this TaskService100TaskService.
:rtype: ResourceStatus
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this TaskService100TaskService.
:param status: The status of this TaskService100TaskService.
:type: ResourceStatus
"""
self._status = status
@property
def tasks(self):
"""
Gets the tasks of this TaskService100TaskService.
References to the Tasks collection.
:return: The tasks of this TaskService100TaskService.
:rtype: TaskCollectionTaskCollection
"""
return self._tasks
@tasks.setter
def tasks(self, tasks):
"""
Sets the tasks of this TaskService100TaskService.
References to the Tasks collection.
:param tasks: The tasks of this TaskService100TaskService.
:type: TaskCollectionTaskCollection
"""
self._tasks = tasks
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
|
''' pydevd - a debugging daemon
This is the daemon you launch for python remote debugging.
Protocol:
each command has a format:
id\tsequence-num\ttext
id: protocol command number
sequence-num: each request has a sequence number. Sequence numbers
originating at the debugger are odd, sequence numbers originating
at the daemon are even. Every response uses the same sequence number
as the request.
payload: it is protocol dependent. When response is a complex structure, it
is returned as XML. Each attribute value is urlencoded, and then the whole
payload is urlencoded again to prevent stray characters corrupting protocol/xml encodings
Commands:
NUMBER NAME FROM* ARGUMENTS RESPONSE NOTE
100 series: program execution
101 RUN JAVA - -
102 LIST_THREADS JAVA RETURN with XML listing of all threads
103 THREAD_CREATE PYDB - XML with thread information
104 THREAD_KILL JAVA id (or * to exit) kills the thread
PYDB id nofies JAVA that thread was killed
105 THREAD_SUSPEND JAVA XML of the stack, suspends the thread
reason for suspension
PYDB id notifies JAVA that thread was suspended
106 CMD_THREAD_RUN JAVA id resume the thread
PYDB id \t reason notifies JAVA that thread was resumed
107 STEP_INTO JAVA thread_id
108 STEP_OVER JAVA thread_id
109 STEP_RETURN JAVA thread_id
110 GET_VARIABLE JAVA thread_id \t frame_id \t GET_VARIABLE with XML of var content
FRAME|GLOBAL \t attributes*
111 SET_BREAK JAVA file/line of the breakpoint
112 REMOVE_BREAK JAVA file/line of the return
113 CMD_EVALUATE_EXPRESSION JAVA expression result of evaluating the expression
114 CMD_GET_FRAME JAVA request for frame contents
115 CMD_EXEC_EXPRESSION JAVA
116 CMD_WRITE_TO_CONSOLE PYDB
117 CMD_CHANGE_VARIABLE
118 CMD_RUN_TO_LINE
119 CMD_RELOAD_CODE
120 CMD_GET_COMPLETIONS JAVA
500 series diagnostics/ok
501 VERSION either Version string (1.0) Currently just used at startup
502 RETURN either Depends on caller -
900 series: errors
901 ERROR either - This is reserved for unexpected errors.
* JAVA - remote debugger, the java end
* PYDB - pydevd, the python end
'''
import os
from _pydev_bundle.pydev_imports import _queue
from _pydev_imps._pydev_saved_modules import time
from _pydev_imps._pydev_saved_modules import thread
from _pydev_imps._pydev_saved_modules import threading
from _pydev_imps._pydev_saved_modules import socket
from socket import socket, AF_INET, SOCK_STREAM, SHUT_RD, SHUT_WR, SOL_SOCKET, SO_REUSEADDR, SHUT_RDWR, timeout
from _pydevd_bundle.pydevd_constants import DebugInfoHolder, dict_contains, get_thread_id, IS_JYTHON, IS_PY2, IS_PY3K, STATE_RUN
try:
from urllib import quote_plus, unquote, unquote_plus
except:
from urllib.parse import quote_plus, unquote, unquote_plus #@Reimport @UnresolvedImport
import pydevconsole
from _pydevd_bundle import pydevd_vars
from _pydevd_bundle import pydevd_xml
from _pydevd_bundle import pydevd_tracing
from _pydevd_bundle import pydevd_vm_type
import pydevd_file_utils
import sys
import traceback
from _pydevd_bundle.pydevd_utils import quote_smart as quote, compare_object_attrs, cmp_to_key, to_string
from _pydev_bundle import pydev_log
from _pydev_bundle import _pydev_completer
from _pydevd_bundle.pydevd_tracing import get_exception_traceback_str
from _pydevd_bundle import pydevd_console
from _pydev_bundle.pydev_monkey import disable_trace_thread_modules, enable_trace_thread_modules
CMD_RUN = 101
CMD_LIST_THREADS = 102
CMD_THREAD_CREATE = 103
CMD_THREAD_KILL = 104
CMD_THREAD_SUSPEND = 105
CMD_THREAD_RUN = 106
CMD_STEP_INTO = 107
CMD_STEP_OVER = 108
CMD_STEP_RETURN = 109
CMD_GET_VARIABLE = 110
CMD_SET_BREAK = 111
CMD_REMOVE_BREAK = 112
CMD_EVALUATE_EXPRESSION = 113
CMD_GET_FRAME = 114
CMD_EXEC_EXPRESSION = 115
CMD_WRITE_TO_CONSOLE = 116
CMD_CHANGE_VARIABLE = 117
CMD_RUN_TO_LINE = 118
CMD_RELOAD_CODE = 119
CMD_GET_COMPLETIONS = 120
# Note: renumbered (conflicted on merge)
CMD_CONSOLE_EXEC = 121
CMD_ADD_EXCEPTION_BREAK = 122
CMD_REMOVE_EXCEPTION_BREAK = 123
CMD_LOAD_SOURCE = 124
CMD_ADD_DJANGO_EXCEPTION_BREAK = 125
CMD_REMOVE_DJANGO_EXCEPTION_BREAK = 126
CMD_SET_NEXT_STATEMENT = 127
CMD_SMART_STEP_INTO = 128
CMD_EXIT = 129
CMD_SIGNATURE_CALL_TRACE = 130
CMD_SET_PY_EXCEPTION = 131
CMD_GET_FILE_CONTENTS = 132
CMD_SET_PROPERTY_TRACE = 133
# Pydev debug console commands
CMD_EVALUATE_CONSOLE_EXPRESSION = 134
CMD_RUN_CUSTOM_OPERATION = 135
CMD_GET_BREAKPOINT_EXCEPTION = 136
CMD_STEP_CAUGHT_EXCEPTION = 137
CMD_SEND_CURR_EXCEPTION_TRACE = 138
CMD_SEND_CURR_EXCEPTION_TRACE_PROCEEDED = 139
CMD_IGNORE_THROWN_EXCEPTION_AT = 140
CMD_ENABLE_DONT_TRACE = 141
CMD_SHOW_CONSOLE = 142
CMD_GET_ARRAY = 143
CMD_STEP_INTO_MY_CODE = 144
CMD_GET_CONCURRENCY_EVENT = 145
CMD_SHOW_RETURN_VALUES = 146
CMD_INPUT_REQUESTED = 147
CMD_GET_DESCRIPTION = 148
CMD_PROCESS_CREATED = 149
CMD_VERSION = 501
CMD_RETURN = 502
CMD_ERROR = 901
ID_TO_MEANING = {
'101': 'CMD_RUN',
'102': 'CMD_LIST_THREADS',
'103': 'CMD_THREAD_CREATE',
'104': 'CMD_THREAD_KILL',
'105': 'CMD_THREAD_SUSPEND',
'106': 'CMD_THREAD_RUN',
'107': 'CMD_STEP_INTO',
'108': 'CMD_STEP_OVER',
'109': 'CMD_STEP_RETURN',
'110': 'CMD_GET_VARIABLE',
'111': 'CMD_SET_BREAK',
'112': 'CMD_REMOVE_BREAK',
'113': 'CMD_EVALUATE_EXPRESSION',
'114': 'CMD_GET_FRAME',
'115': 'CMD_EXEC_EXPRESSION',
'116': 'CMD_WRITE_TO_CONSOLE',
'117': 'CMD_CHANGE_VARIABLE',
'118': 'CMD_RUN_TO_LINE',
'119': 'CMD_RELOAD_CODE',
'120': 'CMD_GET_COMPLETIONS',
'121': 'CMD_CONSOLE_EXEC',
'122': 'CMD_ADD_EXCEPTION_BREAK',
'123': 'CMD_REMOVE_EXCEPTION_BREAK',
'124': 'CMD_LOAD_SOURCE',
'125': 'CMD_ADD_DJANGO_EXCEPTION_BREAK',
'126': 'CMD_REMOVE_DJANGO_EXCEPTION_BREAK',
'127': 'CMD_SET_NEXT_STATEMENT',
'128': 'CMD_SMART_STEP_INTO',
'129': 'CMD_EXIT',
'130': 'CMD_SIGNATURE_CALL_TRACE',
'131': 'CMD_SET_PY_EXCEPTION',
'132': 'CMD_GET_FILE_CONTENTS',
'133': 'CMD_SET_PROPERTY_TRACE',
'134': 'CMD_EVALUATE_CONSOLE_EXPRESSION',
'135': 'CMD_RUN_CUSTOM_OPERATION',
'136': 'CMD_GET_BREAKPOINT_EXCEPTION',
'137': 'CMD_STEP_CAUGHT_EXCEPTION',
'138': 'CMD_SEND_CURR_EXCEPTION_TRACE',
'139': 'CMD_SEND_CURR_EXCEPTION_TRACE_PROCEEDED',
'140': 'CMD_IGNORE_THROWN_EXCEPTION_AT',
'141': 'CMD_ENABLE_DONT_TRACE',
'142': 'CMD_SHOW_CONSOLE',
'143': 'CMD_GET_ARRAY',
'144': 'CMD_STEP_INTO_MY_CODE',
'145': 'CMD_GET_CONCURRENCY_EVENT',
'146': 'CMD_SHOW_RETURN_VALUES',
'147': 'CMD_INPUT_REQUESTED',
'148': 'CMD_GET_DESCRIPTION',
'149': 'CMD_PROCESS_CREATED',
'501': 'CMD_VERSION',
'502': 'CMD_RETURN',
'901': 'CMD_ERROR',
}
MAX_IO_MSG_SIZE = 1000 #if the io is too big, we'll not send all (could make the debugger too non-responsive)
#this number can be changed if there's need to do so
VERSION_STRING = "@@BUILD_NUMBER@@"
from _pydev_bundle._pydev_filesystem_encoding import getfilesystemencoding
file_system_encoding = getfilesystemencoding()
#--------------------------------------------------------------------------------------------------- UTILITIES
#=======================================================================================================================
# pydevd_log
#=======================================================================================================================
def pydevd_log(level, *args):
""" levels are:
0 most serious warnings/errors
1 warnings/significant events
2 informational trace
"""
if level <= DebugInfoHolder.DEBUG_TRACE_LEVEL:
#yes, we can have errors printing if the console of the program has been finished (and we're still trying to print something)
try:
sys.stderr.write('%s\n' % (args,))
except:
pass
#=======================================================================================================================
# GlobalDebuggerHolder
#=======================================================================================================================
class GlobalDebuggerHolder:
'''
Holder for the global debugger.
'''
global_dbg = None # Note: don't rename (the name is used in our attach to process)
#=======================================================================================================================
# get_global_debugger
#=======================================================================================================================
def get_global_debugger():
return GlobalDebuggerHolder.global_dbg
GetGlobalDebugger = get_global_debugger # Backward-compatibility
#=======================================================================================================================
# set_global_debugger
#=======================================================================================================================
def set_global_debugger(dbg):
GlobalDebuggerHolder.global_dbg = dbg
#------------------------------------------------------------------- ACTUAL COMM
#=======================================================================================================================
# PyDBDaemonThread
#=======================================================================================================================
class PyDBDaemonThread(threading.Thread):
created_pydb_daemon_threads = {}
def __init__(self):
threading.Thread.__init__(self)
self.setDaemon(True)
self.killReceived = False
self.pydev_do_not_trace = True
self.is_pydev_daemon_thread = True
def run(self):
created_pydb_daemon = self.created_pydb_daemon_threads
created_pydb_daemon[self] = 1
try:
try:
if IS_JYTHON and not isinstance(threading.currentThread(), threading._MainThread):
# we shouldn't update sys.modules for the main thread, cause it leads to the second importing 'threading'
# module, and the new instance of main thread is created
import org.python.core as PyCore #@UnresolvedImport
ss = PyCore.PySystemState()
# Note: Py.setSystemState() affects only the current thread.
PyCore.Py.setSystemState(ss)
self._on_run()
except:
if sys is not None and traceback is not None:
traceback.print_exc()
finally:
del created_pydb_daemon[self]
def _on_run(self):
raise NotImplementedError('Should be reimplemented by: %s' % self.__class__)
def do_kill_pydev_thread(self):
#that was not working very well because jython gave some socket errors
self.killReceived = True
def _stop_trace(self):
if self.pydev_do_not_trace:
disable_tracing = True
if pydevd_vm_type.get_vm_type() == pydevd_vm_type.PydevdVmType.JYTHON and sys.hexversion <= 0x020201f0:
# don't run untraced threads if we're in jython 2.2.1 or lower
# jython bug: if we start a thread and another thread changes the tracing facility
# it affects other threads (it's not set only for the thread but globally)
# Bug: http://sourceforge.net/tracker/index.php?func=detail&aid=1870039&group_id=12867&atid=112867
disable_tracing = False
if disable_tracing:
pydevd_tracing.SetTrace(None) # no debugging on this thread
#=======================================================================================================================
# ReaderThread
#=======================================================================================================================
class ReaderThread(PyDBDaemonThread):
""" reader thread reads and dispatches commands in an infinite loop """
def __init__(self, sock):
PyDBDaemonThread.__init__(self)
self.sock = sock
self.setName("pydevd.Reader")
from _pydevd_bundle.pydevd_process_net_command import process_net_command
self.process_net_command = process_net_command
self.global_debugger_holder = GlobalDebuggerHolder
def do_kill_pydev_thread(self):
#We must close the socket so that it doesn't stay halted there.
self.killReceived = True
try:
self.sock.shutdown(SHUT_RD) #shutdown the socket for read
except:
#just ignore that
pass
def _on_run(self):
self._stop_trace()
read_buffer = ""
try:
while not self.killReceived:
try:
r = self.sock.recv(1024)
except:
if not self.killReceived:
traceback.print_exc()
self.handle_except()
return #Finished communication.
#Note: the java backend is always expected to pass utf-8 encoded strings. We now work with unicode
#internally and thus, we may need to convert to the actual encoding where needed (i.e.: filenames
#on python 2 may need to be converted to the filesystem encoding).
if hasattr(r, 'decode'):
r = r.decode('utf-8')
read_buffer += r
if DebugInfoHolder.DEBUG_RECORD_SOCKET_READS:
sys.stderr.write('debugger: received >>%s<<\n' % (read_buffer,))
sys.stderr.flush()
if len(read_buffer) == 0:
self.handle_except()
break
while read_buffer.find('\n') != -1:
command, read_buffer = read_buffer.split('\n', 1)
args = command.split('\t', 2)
try:
cmd_id = int(args[0])
pydev_log.debug('Received command: %s %s\n' % (ID_TO_MEANING.get(str(cmd_id), '???'), command,))
self.process_command(cmd_id, int(args[1]), args[2])
except:
traceback.print_exc()
sys.stderr.write("Can't process net command: %s\n" % command)
sys.stderr.flush()
except:
traceback.print_exc()
self.handle_except()
def handle_except(self):
self.global_debugger_holder.global_dbg.finish_debugging_session()
def process_command(self, cmd_id, seq, text):
self.process_net_command(self.global_debugger_holder.global_dbg, cmd_id, seq, text)
#----------------------------------------------------------------------------------- SOCKET UTILITIES - WRITER
#=======================================================================================================================
# WriterThread
#=======================================================================================================================
class WriterThread(PyDBDaemonThread):
""" writer thread writes out the commands in an infinite loop """
def __init__(self, sock):
PyDBDaemonThread.__init__(self)
self.sock = sock
self.setName("pydevd.Writer")
self.cmdQueue = _queue.Queue()
if pydevd_vm_type.get_vm_type() == 'python':
self.timeout = 0
else:
self.timeout = 0.1
def add_command(self, cmd):
""" cmd is NetCommand """
if not self.killReceived: #we don't take new data after everybody die
self.cmdQueue.put(cmd)
def _on_run(self):
""" just loop and write responses """
self._stop_trace()
get_has_timeout = sys.hexversion >= 0x02030000 # 2.3 onwards have it.
try:
while True:
try:
try:
if get_has_timeout:
cmd = self.cmdQueue.get(1, 0.1)
else:
time.sleep(.01)
cmd = self.cmdQueue.get(0)
except _queue.Empty:
if self.killReceived:
try:
self.sock.shutdown(SHUT_WR)
self.sock.close()
except:
pass
return #break if queue is empty and killReceived
else:
continue
except:
#pydevd_log(0, 'Finishing debug communication...(1)')
#when liberating the thread here, we could have errors because we were shutting down
#but the thread was still not liberated
return
out = cmd.outgoing
if DebugInfoHolder.DEBUG_TRACE_LEVEL >= 1:
out_message = 'sending cmd --> '
out_message += "%20s" % ID_TO_MEANING.get(out[:3], 'UNKNOWN')
out_message += ' '
out_message += unquote(unquote(out)).replace('\n', ' ')
try:
sys.stderr.write('%s\n' % (out_message,))
except:
pass
if IS_PY3K:
out = bytearray(out, 'utf-8')
self.sock.send(out) #TODO: this does not guarantee that all message are sent (and jython does not have a send all)
if cmd.id == CMD_EXIT:
break
if time is None:
break #interpreter shutdown
time.sleep(self.timeout)
except Exception:
GlobalDebuggerHolder.global_dbg.finish_debugging_session()
if DebugInfoHolder.DEBUG_TRACE_LEVEL >= 0:
traceback.print_exc()
def empty(self):
return self.cmdQueue.empty()
#--------------------------------------------------- CREATING THE SOCKET THREADS
#=======================================================================================================================
# start_server
#=======================================================================================================================
def start_server(port):
""" binds to a port, waits for the debugger to connect """
s = socket(AF_INET, SOCK_STREAM)
s.settimeout(None)
try:
from socket import SO_REUSEPORT
s.setsockopt(SOL_SOCKET, SO_REUSEPORT, 1)
except ImportError:
s.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
s.bind(('', port))
pydevd_log(1, "Bound to port ", str(port))
try:
s.listen(1)
newSock, _addr = s.accept()
pydevd_log(1, "Connection accepted")
# closing server socket is not necessary but we don't need it
s.shutdown(SHUT_RDWR)
s.close()
return newSock
except:
sys.stderr.write("Could not bind to port: %s\n" % (port,))
sys.stderr.flush()
traceback.print_exc()
sys.exit(1) #TODO: is it safe?
#=======================================================================================================================
# start_client
#=======================================================================================================================
def start_client(host, port):
""" connects to a host/port """
pydevd_log(1, "Connecting to ", host, ":", str(port))
s = socket(AF_INET, SOCK_STREAM)
MAX_TRIES = 100
i = 0
while i<MAX_TRIES:
try:
s.connect((host, port))
except:
i+=1
time.sleep(0.2)
continue
pydevd_log(1, "Connected.")
return s
sys.stderr.write("Could not connect to %s: %s\n" % (host, port))
sys.stderr.flush()
traceback.print_exc()
sys.exit(1) #TODO: is it safe?
#------------------------------------------------------------------------------------ MANY COMMUNICATION STUFF
#=======================================================================================================================
# NetCommand
#=======================================================================================================================
class NetCommand:
""" Commands received/sent over the network.
Command can represent command received from the debugger,
or one to be sent by daemon.
"""
next_seq = 0 # sequence numbers
def __init__(self, id, seq, text):
""" smart handling of parameters
if sequence is 0, new sequence will be generated
if text has carriage returns they'll be replaced"""
self.id = id
if seq == 0:
NetCommand.next_seq += 2
seq = NetCommand.next_seq
self.seq = seq
self.text = text
encoded = quote(to_string(text), '/<>_=" \t')
self.outgoing = '%s\t%s\t%s\n' % (id, seq, encoded)
#=======================================================================================================================
# NetCommandFactory
#=======================================================================================================================
class NetCommandFactory:
def _thread_to_xml(self, thread):
""" thread information as XML """
name = pydevd_xml.make_valid_xml_value(thread.getName())
cmdText = '<thread name="%s" id="%s" />' % (quote(name), get_thread_id(thread))
return cmdText
def make_error_message(self, seq, text):
cmd = NetCommand(CMD_ERROR, seq, text)
if DebugInfoHolder.DEBUG_TRACE_LEVEL > 2:
sys.stderr.write("Error: %s" % (text,))
return cmd
def make_thread_created_message(self, thread):
cmdText = "<xml>" + self._thread_to_xml(thread) + "</xml>"
return NetCommand(CMD_THREAD_CREATE, 0, cmdText)
def make_process_created_message(self):
cmdText = '<process/>'
return NetCommand(CMD_PROCESS_CREATED, 0, cmdText)
def make_custom_frame_created_message(self, frameId, frameDescription):
frameDescription = pydevd_xml.make_valid_xml_value(frameDescription)
cmdText = '<xml><thread name="%s" id="%s"/></xml>' % (frameDescription, frameId)
return NetCommand(CMD_THREAD_CREATE, 0, cmdText)
def make_list_threads_message(self, seq):
""" returns thread listing as XML """
try:
t = threading.enumerate()
cmd_text = ["<xml>"]
append = cmd_text.append
for i in t:
if t.isAlive():
append(self._thread_to_xml(i))
append("</xml>")
return NetCommand(CMD_RETURN, seq, ''.join(cmd_text))
except:
return self.make_error_message(seq, get_exception_traceback_str())
def make_variable_changed_message(self, seq, payload):
# notify debugger that value was changed successfully
return NetCommand(CMD_RETURN, seq, payload)
def make_io_message(self, v, ctx, dbg=None):
'''
@param v: the message to pass to the debug server
@param ctx: 1 for stdio 2 for stderr
@param dbg: If not none, add to the writer
'''
try:
if len(v) > MAX_IO_MSG_SIZE:
v = v[0:MAX_IO_MSG_SIZE]
v += '...'
v = pydevd_xml.make_valid_xml_value(quote(v, '/>_= \t'))
net = NetCommand(str(CMD_WRITE_TO_CONSOLE), 0, '<xml><io s="%s" ctx="%s"/></xml>' % (v, ctx))
except:
net = self.make_error_message(0, get_exception_traceback_str())
if dbg:
dbg.writer.add_command(net)
return net
def make_version_message(self, seq):
try:
return NetCommand(CMD_VERSION, seq, VERSION_STRING)
except:
return self.make_error_message(seq, get_exception_traceback_str())
def make_thread_killed_message(self, id):
try:
return NetCommand(CMD_THREAD_KILL, 0, str(id))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_thread_suspend_str(self, thread_id, frame, stop_reason, message):
""" <xml>
<thread id="id" stop_reason="reason">
<frame id="id" name="functionName " file="file" line="line">
<var variable stuffff....
</frame>
</thread>
"""
cmd_text_list = ["<xml>"]
append = cmd_text_list.append
make_valid_xml_value = pydevd_xml.make_valid_xml_value
if message:
message = make_valid_xml_value(message)
append('<thread id="%s" stop_reason="%s" message="%s">' % (thread_id, stop_reason, message))
curr_frame = frame
try:
while curr_frame:
#print cmdText
my_id = id(curr_frame)
#print "id is ", my_id
if curr_frame.f_code is None:
break #Iron Python sometimes does not have it!
my_name = curr_frame.f_code.co_name #method name (if in method) or ? if global
if my_name is None:
break #Iron Python sometimes does not have it!
#print "name is ", my_name
abs_path_real_path_and_base = pydevd_file_utils.get_abs_path_real_path_and_base_from_frame(curr_frame)
myFile = pydevd_file_utils.norm_file_to_client(abs_path_real_path_and_base[0])
if file_system_encoding.lower() != "utf-8" and hasattr(myFile, "decode"):
# myFile is a byte string encoded using the file system encoding
# convert it to utf8
myFile = myFile.decode(file_system_encoding).encode("utf-8")
#print "file is ", myFile
#myFile = inspect.getsourcefile(curr_frame) or inspect.getfile(frame)
myLine = str(curr_frame.f_lineno)
#print "line is ", myLine
#the variables are all gotten 'on-demand'
#variables = pydevd_xml.frame_vars_to_xml(curr_frame.f_locals)
variables = ''
append('<frame id="%s" name="%s" ' % (my_id , make_valid_xml_value(my_name)))
append('file="%s" line="%s">' % (quote(myFile, '/>_= \t'), myLine))
append(variables)
append("</frame>")
curr_frame = curr_frame.f_back
except :
traceback.print_exc()
append("</thread></xml>")
return ''.join(cmd_text_list)
def make_thread_suspend_message(self, thread_id, frame, stop_reason, message):
try:
return NetCommand(CMD_THREAD_SUSPEND, 0, self.make_thread_suspend_str(thread_id, frame, stop_reason, message))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_thread_run_message(self, id, reason):
try:
return NetCommand(CMD_THREAD_RUN, 0, str(id) + "\t" + str(reason))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_get_variable_message(self, seq, payload):
try:
return NetCommand(CMD_GET_VARIABLE, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_array_message(self, seq, payload):
try:
return NetCommand(CMD_GET_ARRAY, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_description_message(self, seq, payload):
try:
return NetCommand(CMD_GET_DESCRIPTION, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_frame_message(self, seq, payload):
try:
return NetCommand(CMD_GET_FRAME, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_evaluate_expression_message(self, seq, payload):
try:
return NetCommand(CMD_EVALUATE_EXPRESSION, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_completions_message(self, seq, payload):
try:
return NetCommand(CMD_GET_COMPLETIONS, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_file_contents(self, seq, payload):
try:
return NetCommand(CMD_GET_FILE_CONTENTS, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_send_breakpoint_exception_message(self, seq, payload):
try:
return NetCommand(CMD_GET_BREAKPOINT_EXCEPTION, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_send_curr_exception_trace_message(self, seq, thread_id, curr_frame_id, exc_type, exc_desc, trace_obj):
try:
while trace_obj.tb_next is not None:
trace_obj = trace_obj.tb_next
exc_type = pydevd_xml.make_valid_xml_value(str(exc_type)).replace('\t', ' ') or 'exception: type unknown'
exc_desc = pydevd_xml.make_valid_xml_value(str(exc_desc)).replace('\t', ' ') or 'exception: no description'
payload = str(curr_frame_id) + '\t' + exc_type + "\t" + exc_desc + "\t" + \
self.make_thread_suspend_str(thread_id, trace_obj.tb_frame, CMD_SEND_CURR_EXCEPTION_TRACE, '')
return NetCommand(CMD_SEND_CURR_EXCEPTION_TRACE, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_send_curr_exception_trace_proceeded_message(self, seq, thread_id):
try:
return NetCommand(CMD_SEND_CURR_EXCEPTION_TRACE_PROCEEDED, 0, str(thread_id))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_send_console_message(self, seq, payload):
try:
return NetCommand(CMD_EVALUATE_CONSOLE_EXPRESSION, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_custom_operation_message(self, seq, payload):
try:
return NetCommand(CMD_RUN_CUSTOM_OPERATION, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_load_source_message(self, seq, source, dbg=None):
try:
net = NetCommand(CMD_LOAD_SOURCE, seq, '%s' % source)
except:
net = self.make_error_message(0, get_exception_traceback_str())
if dbg:
dbg.writer.add_command(net)
return net
def make_show_console_message(self, thread_id, frame):
try:
return NetCommand(CMD_SHOW_CONSOLE, 0, self.make_thread_suspend_str(thread_id, frame, CMD_SHOW_CONSOLE, ''))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_input_requested_message(self, started):
try:
return NetCommand(CMD_INPUT_REQUESTED, 0, started)
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_exit_message(self):
try:
net = NetCommand(CMD_EXIT, 0, '')
except:
net = self.make_error_message(0, get_exception_traceback_str())
return net
INTERNAL_TERMINATE_THREAD = 1
INTERNAL_SUSPEND_THREAD = 2
#=======================================================================================================================
# InternalThreadCommand
#=======================================================================================================================
class InternalThreadCommand:
""" internal commands are generated/executed by the debugger.
The reason for their existence is that some commands have to be executed
on specific threads. These are the InternalThreadCommands that get
get posted to PyDB.cmdQueue.
"""
def can_be_executed_by(self, thread_id):
'''By default, it must be in the same thread to be executed
'''
return self.thread_id == thread_id or self.thread_id.endswith('|' + thread_id)
def do_it(self, dbg):
raise NotImplementedError("you have to override do_it")
class ReloadCodeCommand(InternalThreadCommand):
def __init__(self, module_name, thread_id):
self.thread_id = thread_id
self.module_name = module_name
self.executed = False
self.lock = thread.allocate_lock()
def can_be_executed_by(self, thread_id):
if self.thread_id == '*':
return True #Any thread can execute it!
return InternalThreadCommand.can_be_executed_by(self, thread_id)
def do_it(self, dbg):
self.lock.acquire()
try:
if self.executed:
return
self.executed = True
finally:
self.lock.release()
module_name = self.module_name
if not dict_contains(sys.modules, module_name):
if '.' in module_name:
new_module_name = module_name.split('.')[-1]
if dict_contains(sys.modules, new_module_name):
module_name = new_module_name
if not dict_contains(sys.modules, module_name):
sys.stderr.write('pydev debugger: Unable to find module to reload: "' + module_name + '".\n')
# Too much info...
# sys.stderr.write('pydev debugger: This usually means you are trying to reload the __main__ module (which cannot be reloaded).\n')
else:
sys.stderr.write('pydev debugger: Start reloading module: "' + module_name + '" ... \n')
from _pydevd_bundle import pydevd_reload
if pydevd_reload.xreload(sys.modules[module_name]):
sys.stderr.write('pydev debugger: reload finished\n')
else:
sys.stderr.write('pydev debugger: reload finished without applying any change\n')
#=======================================================================================================================
# InternalTerminateThread
#=======================================================================================================================
class InternalTerminateThread(InternalThreadCommand):
def __init__(self, thread_id):
self.thread_id = thread_id
def do_it(self, dbg):
pydevd_log(1, "killing ", str(self.thread_id))
cmd = dbg.cmd_factory.make_thread_killed_message(self.thread_id)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalRunThread
#=======================================================================================================================
class InternalRunThread(InternalThreadCommand):
def __init__(self, thread_id):
self.thread_id = thread_id
def do_it(self, dbg):
t = pydevd_find_thread_by_id(self.thread_id)
if t:
t.additional_info.pydev_step_cmd = -1
t.additional_info.pydev_step_stop = None
t.additional_info.pydev_state = STATE_RUN
#=======================================================================================================================
# InternalStepThread
#=======================================================================================================================
class InternalStepThread(InternalThreadCommand):
def __init__(self, thread_id, cmd_id):
self.thread_id = thread_id
self.cmd_id = cmd_id
def do_it(self, dbg):
t = pydevd_find_thread_by_id(self.thread_id)
if t:
t.additional_info.pydev_step_cmd = self.cmd_id
t.additional_info.pydev_state = STATE_RUN
#=======================================================================================================================
# InternalSetNextStatementThread
#=======================================================================================================================
class InternalSetNextStatementThread(InternalThreadCommand):
def __init__(self, thread_id, cmd_id, line, func_name):
self.thread_id = thread_id
self.cmd_id = cmd_id
self.line = line
if IS_PY2:
if isinstance(func_name, unicode):
# On cython with python 2.X it requires an str, not unicode (but on python 3.3 it should be a str, not bytes).
func_name = func_name.encode('utf-8')
self.func_name = func_name
def do_it(self, dbg):
t = pydevd_find_thread_by_id(self.thread_id)
if t:
t.additional_info.pydev_step_cmd = self.cmd_id
t.additional_info.pydev_next_line = int(self.line)
t.additional_info.pydev_func_name = self.func_name
t.additional_info.pydev_state = STATE_RUN
#=======================================================================================================================
# InternalGetVariable
#=======================================================================================================================
class InternalGetVariable(InternalThreadCommand):
""" gets the value of a variable """
def __init__(self, seq, thread_id, frame_id, scope, attrs):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.scope = scope
self.attributes = attrs
def do_it(self, dbg):
""" Converts request into python variable """
try:
xml = "<xml>"
valDict = pydevd_vars.resolve_compound_variable(self.thread_id, self.frame_id, self.scope, self.attributes)
if valDict is None:
valDict = {}
keys = valDict.keys()
if hasattr(keys, 'sort'):
keys.sort(compare_object_attrs) #Python 3.0 does not have it
else:
if IS_PY3K:
keys = sorted(keys, key=cmp_to_key(compare_object_attrs)) #Jython 2.1 does not have it (and all must be compared as strings).
else:
keys = sorted(keys, cmp=compare_object_attrs) #Jython 2.1 does not have it (and all must be compared as strings).
for k in keys:
xml += pydevd_xml.var_to_xml(valDict[k], to_string(k))
xml += "</xml>"
cmd = dbg.cmd_factory.make_get_variable_message(self.sequence, xml)
dbg.writer.add_command(cmd)
except Exception:
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error resolving variables " + get_exception_traceback_str())
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalGetArray
#=======================================================================================================================
class InternalGetArray(InternalThreadCommand):
def __init__(self, seq, roffset, coffset, rows, cols, format, thread_id, frame_id, scope, attrs):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.scope = scope
self.name = attrs.split("\t")[-1]
self.attrs = attrs
self.roffset = int(roffset)
self.coffset = int(coffset)
self.rows = int(rows)
self.cols = int(cols)
self.format = format
def do_it(self, dbg):
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
var = pydevd_vars.eval_in_context(self.name, frame.f_globals, frame.f_locals)
xml = pydevd_vars.table_like_struct_to_xml(var, self.name, self.roffset, self.coffset, self.rows, self.cols, self.format )
cmd = dbg.cmd_factory.make_get_array_message(self.sequence, xml)
dbg.writer.add_command(cmd)
except:
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error resolving array: " + get_exception_traceback_str())
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalChangeVariable
#=======================================================================================================================
class InternalChangeVariable(InternalThreadCommand):
""" changes the value of a variable """
def __init__(self, seq, thread_id, frame_id, scope, attr, expression):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.scope = scope
self.attr = attr
self.expression = expression
def do_it(self, dbg):
""" Converts request into python variable """
try:
result = pydevd_vars.change_attr_expression(self.thread_id, self.frame_id, self.attr, self.expression, dbg)
xml = "<xml>"
xml += pydevd_xml.var_to_xml(result, "")
xml += "</xml>"
cmd = dbg.cmd_factory.make_variable_changed_message(self.sequence, xml)
dbg.writer.add_command(cmd)
except Exception:
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error changing variable attr:%s expression:%s traceback:%s" % (self.attr, self.expression, get_exception_traceback_str()))
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalGetFrame
#=======================================================================================================================
class InternalGetFrame(InternalThreadCommand):
""" gets the value of a variable """
def __init__(self, seq, thread_id, frame_id):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
def do_it(self, dbg):
""" Converts request into python variable """
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
if frame is not None:
hidden_ns = pydevconsole.get_ipython_hidden_vars()
xml = "<xml>"
xml += pydevd_xml.frame_vars_to_xml(frame.f_locals, hidden_ns)
del frame
xml += "</xml>"
cmd = dbg.cmd_factory.make_get_frame_message(self.sequence, xml)
dbg.writer.add_command(cmd)
else:
#pydevd_vars.dump_frames(self.thread_id)
#don't print this error: frame not found: means that the client is not synchronized (but that's ok)
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Frame not found: %s from thread: %s" % (self.frame_id, self.thread_id))
dbg.writer.add_command(cmd)
except:
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error resolving frame: %s from thread: %s" % (self.frame_id, self.thread_id))
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalEvaluateExpression
#=======================================================================================================================
class InternalEvaluateExpression(InternalThreadCommand):
""" gets the value of a variable """
def __init__(self, seq, thread_id, frame_id, expression, doExec, doTrim, temp_name):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.expression = expression
self.doExec = doExec
self.doTrim = doTrim
self.temp_name = temp_name
def do_it(self, dbg):
""" Converts request into python variable """
try:
result = pydevd_vars.evaluate_expression(self.thread_id, self.frame_id, self.expression, self.doExec)
if self.temp_name != "":
pydevd_vars.change_attr_expression(self.thread_id, self.frame_id, self.temp_name, self.expression, dbg, result)
xml = "<xml>"
xml += pydevd_xml.var_to_xml(result, self.expression, self.doTrim)
xml += "</xml>"
cmd = dbg.cmd_factory.make_evaluate_expression_message(self.sequence, xml)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating expression " + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalGetCompletions
#=======================================================================================================================
class InternalGetCompletions(InternalThreadCommand):
""" Gets the completions in a given scope """
def __init__(self, seq, thread_id, frame_id, act_tok):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.act_tok = act_tok
def do_it(self, dbg):
""" Converts request into completions """
try:
remove_path = None
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
if frame is not None:
msg = _pydev_completer.generate_completions_as_xml(frame, self.act_tok)
cmd = dbg.cmd_factory.make_get_completions_message(self.sequence, msg)
dbg.writer.add_command(cmd)
else:
cmd = dbg.cmd_factory.make_error_message(self.sequence, "InternalGetCompletions: Frame not found: %s from thread: %s" % (self.frame_id, self.thread_id))
dbg.writer.add_command(cmd)
finally:
if remove_path is not None:
sys.path.remove(remove_path)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating expression " + exc)
dbg.writer.add_command(cmd)
# =======================================================================================================================
# InternalGetDescription
# =======================================================================================================================
class InternalGetDescription(InternalThreadCommand):
""" Fetch the variable description stub from the debug console
"""
def __init__(self, seq, thread_id, frame_id, expression):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.expression = expression
def do_it(self, dbg):
""" Get completions and write back to the client
"""
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
description = pydevd_console.get_description(frame, self.thread_id, self.frame_id, self.expression)
description = pydevd_xml.make_valid_xml_value(quote(description, '/>_= \t'))
description_xml = '<xml><var name="" type="" value="%s"/></xml>' % description
cmd = dbg.cmd_factory.make_get_description_message(self.sequence, description_xml)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error in fetching description" + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalGetBreakpointException
#=======================================================================================================================
class InternalGetBreakpointException(InternalThreadCommand):
""" Send details of exception raised while evaluating conditional breakpoint """
def __init__(self, thread_id, exc_type, stacktrace):
self.sequence = 0
self.thread_id = thread_id
self.stacktrace = stacktrace
self.exc_type = exc_type
def do_it(self, dbg):
try:
callstack = "<xml>"
makeValid = pydevd_xml.make_valid_xml_value
for filename, line, methodname, methodobj in self.stacktrace:
if file_system_encoding.lower() != "utf-8" and hasattr(filename, "decode"):
# filename is a byte string encoded using the file system encoding
# convert it to utf8
filename = filename.decode(file_system_encoding).encode("utf-8")
callstack += '<frame thread_id = "%s" file="%s" line="%s" name="%s" obj="%s" />' \
% (self.thread_id, makeValid(filename), line, makeValid(methodname), makeValid(methodobj))
callstack += "</xml>"
cmd = dbg.cmd_factory.make_send_breakpoint_exception_message(self.sequence, self.exc_type + "\t" + callstack)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error Sending Exception: " + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalSendCurrExceptionTrace
#=======================================================================================================================
class InternalSendCurrExceptionTrace(InternalThreadCommand):
""" Send details of the exception that was caught and where we've broken in.
"""
def __init__(self, thread_id, arg, curr_frame_id):
'''
:param arg: exception type, description, traceback object
'''
self.sequence = 0
self.thread_id = thread_id
self.curr_frame_id = curr_frame_id
self.arg = arg
def do_it(self, dbg):
try:
cmd = dbg.cmd_factory.make_send_curr_exception_trace_message(self.sequence, self.thread_id, self.curr_frame_id, *self.arg)
del self.arg
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error Sending Current Exception Trace: " + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalSendCurrExceptionTraceProceeded
#=======================================================================================================================
class InternalSendCurrExceptionTraceProceeded(InternalThreadCommand):
""" Send details of the exception that was caught and where we've broken in.
"""
def __init__(self, thread_id):
self.sequence = 0
self.thread_id = thread_id
def do_it(self, dbg):
try:
cmd = dbg.cmd_factory.make_send_curr_exception_trace_proceeded_message(self.sequence, self.thread_id)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error Sending Current Exception Trace Proceeded: " + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalEvaluateConsoleExpression
#=======================================================================================================================
class InternalEvaluateConsoleExpression(InternalThreadCommand):
""" Execute the given command in the debug console """
def __init__(self, seq, thread_id, frame_id, line, buffer_output=True):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.line = line
self.buffer_output = buffer_output
def do_it(self, dbg):
""" Create an XML for console output, error and more (true/false)
<xml>
<output message=output_message></output>
<error message=error_message></error>
<more>true/false</more>
</xml>
"""
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
if frame is not None:
console_message = pydevd_console.execute_console_command(
frame, self.thread_id, self.frame_id, self.line, self.buffer_output)
cmd = dbg.cmd_factory.make_send_console_message(self.sequence, console_message.to_xml())
else:
from _pydevd_bundle.pydevd_console import ConsoleMessage
console_message = ConsoleMessage()
console_message.add_console_message(
pydevd_console.CONSOLE_ERROR,
"Select the valid frame in the debug view (thread: %s, frame: %s invalid)" % (self.thread_id, self.frame_id),
)
cmd = dbg.cmd_factory.make_error_message(self.sequence, console_message.to_xml())
except:
exc = get_exception_traceback_str()
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating expression " + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalRunCustomOperation
#=======================================================================================================================
class InternalRunCustomOperation(InternalThreadCommand):
""" Run a custom command on an expression
"""
def __init__(self, seq, thread_id, frame_id, scope, attrs, style, encoded_code_or_file, fnname):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.scope = scope
self.attrs = attrs
self.style = style
self.code_or_file = unquote_plus(encoded_code_or_file)
self.fnname = fnname
def do_it(self, dbg):
try:
res = pydevd_vars.custom_operation(self.thread_id, self.frame_id, self.scope, self.attrs,
self.style, self.code_or_file, self.fnname)
resEncoded = quote_plus(res)
cmd = dbg.cmd_factory.make_custom_operation_message(self.sequence, resEncoded)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error in running custom operation" + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalConsoleGetCompletions
#=======================================================================================================================
class InternalConsoleGetCompletions(InternalThreadCommand):
""" Fetch the completions in the debug console
"""
def __init__(self, seq, thread_id, frame_id, act_tok):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.act_tok = act_tok
def do_it(self, dbg):
""" Get completions and write back to the client
"""
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
completions_xml = pydevd_console.get_completions(frame, self.act_tok)
cmd = dbg.cmd_factory.make_send_console_message(self.sequence, completions_xml)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error in fetching completions" + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalConsoleExec
#=======================================================================================================================
class InternalConsoleExec(InternalThreadCommand):
""" gets the value of a variable """
def __init__(self, seq, thread_id, frame_id, expression):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.expression = expression
def do_it(self, dbg):
""" Converts request into python variable """
try:
try:
#don't trace new threads created by console command
disable_trace_thread_modules()
result = pydevconsole.console_exec(self.thread_id, self.frame_id, self.expression, dbg)
xml = "<xml>"
xml += pydevd_xml.var_to_xml(result, "")
xml += "</xml>"
cmd = dbg.cmd_factory.make_evaluate_expression_message(self.sequence, xml)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating console expression " + exc)
dbg.writer.add_command(cmd)
finally:
enable_trace_thread_modules()
sys.stderr.flush()
sys.stdout.flush()
#=======================================================================================================================
# pydevd_find_thread_by_id
#=======================================================================================================================
def pydevd_find_thread_by_id(thread_id):
try:
# there was a deadlock here when I did not remove the tracing function when thread was dead
threads = threading.enumerate()
for i in threads:
tid = get_thread_id(i)
if thread_id == tid or thread_id.endswith('|' + tid):
return i
sys.stderr.write("Could not find thread %s\n" % thread_id)
sys.stderr.write("Available: %s\n" % [get_thread_id(t) for t in threads])
sys.stderr.flush()
except:
traceback.print_exc()
return None
|
|
#!/usr/bin/env python
# PythonJS to Dart Translator
# by Brett Hartshorn - copyright 2013
# License: "New BSD"
import sys
import ast
import pythonjs
class TransformSuperCalls( ast.NodeVisitor ):
def __init__(self, node, class_names):
self._class_names = class_names
self.visit(node)
def visit_Call(self, node):
if isinstance(node.func, ast.Attribute) and isinstance(node.func.value, ast.Name) and node.func.value.id in self._class_names:
node.func.attr = '__' + node.func.attr
class CollectNames(ast.NodeVisitor):
def __init__(self):
self._names = []
def visit_Name(self, node):
self._names.append( node )
def collect_names(node):
a = CollectNames()
a.visit( node )
return a._names
class DartGenerator( pythonjs.JSGenerator ):
def __init__(self, requirejs=False, insert_runtime=False):
pythonjs.JSGenerator.__init__(self, requirejs=False, insert_runtime=False)
self._classes = dict()
self._class_props = dict()
self._raw_dict = False
def visit_With(self, node):
s = []
for b in node.body:
a = self.visit(b)
a = a.replace('\\n', '\n')
a = a.strip()[1:-2] # strip `"x";` to `x`
s.append( a )
return '\n'.join(s)
def _visit_subscript_ellipsis(self, node):
name = self.visit(node.value)
return '%s.$wrapped' %name
def visit_List(self, node):
return 'new list([%s])' % ', '.join(map(self.visit, node.elts))
def visit_Dict(self, node):
a = []
for i in range( len(node.keys) ):
k = self.visit( node.keys[ i ] )
v = self.visit( node.values[i] )
a.append( '%s:%s'%(k,v) )
b = ','.join( a )
if self._raw_dict:
return '{%s}' %b
else:
return 'new dict( {%s} )' %b
def visit_ClassDef(self, node):
node._parents = set()
out = []
extends = False ## Dart has no support for multiple inheritance!
props = set(['$wrapped'])
bases = set()
base_classes = set()
self._classes[ node.name ] = node
self._class_props[ node.name ] = props
for decor in node.decorator_list: ## class decorators
if isinstance(decor, ast.Call):
props.update( [self.visit(a) for a in decor.args] )
elif isinstance(decor, ast.Attribute) and isinstance(decor.value, ast.Name) and decor.value.id == 'dart':
if decor.attr == 'extends':
extends = True
props.add('$wrapped')
for name_node in collect_names( node ):
if name_node.id == 'self':
name_node.id = 'this'
else:
raise SyntaxError
for base in node.bases:
n = self.visit(base)
if n == 'object':
continue
node._parents.add( n )
bases.add( n )
if n in self._class_props:
props.update( self._class_props[n] )
base_classes.add( self._classes[n] )
else: ## special case - subclassing a builtin like `list`
continue
for p in self._classes[ n ]._parents:
bases.add( p )
props.update( self._class_props[p] )
base_classes.add( self._classes[p] )
if bases:
if extends:
assert len(bases) == 1
out.append('class %s extends %s {'%(node.name, ','.join(bases)))
else:
#if bases[0] == 'object':
# out.append('class %s {' %node.name)
#else:
out.append('class %s implements %s {'%(node.name, ', '.join(bases)))
else:
out.append('class %s {' %node.name)
self.push()
for p in props:
out.append(self.indent()+ 'var %s;'%p)
method_names = set()
for b in node.body:
if isinstance(b, ast.With):
out.append( self.visit(b) )
elif isinstance(b, ast.FunctionDef) and len(b.decorator_list): ##getter/setters
for name_node in collect_names( b ):
if name_node.id == 'self':
name_node.id = 'this'
b.args.args = b.args.args[1:]
out.append( self.visit(b) )
elif extends:
if isinstance(b, ast.FunctionDef):
b.args.args = b.args.args[1:]
if b.name == node.name:
args = [self.visit(a) for a in b.args.args]
args = ','.join(args)
out.append(
self.indent()+'%s(%s) : super() { this.__init__(%s); }'%(node.name, args, args)
)
b.name = '__init__'
elif b.name == '__getitem__':
b.name = ''
b._prefix = 'operator []'
elif b.name == '__setitem__':
b.name = ''
b._prefix = 'void operator []='
elif b.name == '__add__':
b.name = ''
b._prefix = 'operator +'
elif b.name == '__iadd__':
b.name = ''
b._prefix = 'void operator +='
elif b.name == '__sub__':
b.name = ''
b._prefix = 'operator -'
elif b.name == '__mul__':
b.name = ''
b._prefix = 'operator *'
elif b.name == '__div__':
b.name = ''
b._prefix = 'operator /'
elif b.name == '__or__':
b.name = ''
b._prefix = 'operator |'
elif b.name == '__xor__':
b.name = ''
b._prefix = 'operator ^'
line = self.visit(b)
out.append( line )
elif isinstance(b, ast.FunctionDef) and b.name == node.name:
args, kwargs = self.get_args_kwargs_from_funcdef(b, skip_self=True)
kwargs_init = ['%s:%s' %(x.split(':')[0], x.split(':')[0]) for x in kwargs]
#args = [self.visit(a) for a in b.args.args][1:]
#args = ','.join(args)
b._prefix = 'static void'
b.name = '__init__'
out.append( self.visit(b) )
if args:
args = ','.join(args)
if kwargs:
out.append(
self.indent()+'%s(%s, {%s}) {%s.__init__(this,%s,%s);}'%(node.name, args, ','.join(kwargs), node.name, args, ','.join(kwargs_init))
)
else:
out.append(
self.indent()+'%s(%s) {%s.__init__(this,%s);}'%(node.name, args, node.name, args)
)
elif kwargs:
out.append(
self.indent()+'%s( {%s} ) {%s.__init__(this,%s);}'%(node.name, ','.join(kwargs), node.name, ','.join(kwargs_init))
)
else:
out.append(
self.indent()+'%s() {%s.__init__(this);}'%(node.name, node.name)
)
elif isinstance(b, ast.FunctionDef):
method_names.add( b.name )
TransformSuperCalls( b, bases )
operator = False
if b.name == '__getitem__':
operator = 'operator []'
elif b.name == '__setitem__':
operator = 'operator []='
elif b.name == '__add__':
operator = 'operator +'
elif b.name == '__sub__':
operator = 'operator -'
elif b.name == '__mul__':
operator = 'operator *'
elif b.name == '__div__':
operator = 'operator /'
elif b.name == '__and__':
operator = 'operator &'
elif b.name == '__or__':
operator = 'operator |'
elif b.name == '__xor__':
operator = 'operator ^'
elif b.name == '__lshift__':
operator = 'operator <<'
elif b.name == '__rshift__':
operator = 'operator >>'
args = [self.visit(a) for a in b.args.args][1:]
args = ','.join(args)
if operator and args:
out.append(self.indent()+ '%s(%s) { return %s.__%s(this,%s); }'%(operator, args, node.name, b.name, args) )
elif operator:
out.append(self.indent()+ '%s() { return %s.__%s(this); }'%(operator, node.name, b.name) )
elif args:
out.append(self.indent()+ '%s(%s) { return %s.__%s(this,%s); }'%(b.name, args, node.name, b.name, args) )
else:
out.append(self.indent()+ '%s() { return %s.__%s(this); }'%(b.name, node.name, b.name) )
b._prefix = 'static'
name = b.name
b.name = '__%s'%name
out.append( self.visit(b) )
b.name = name
else:
line = self.visit(b)
if line.startswith('var '):
out.append( self.indent()+line )
else:
out.append( line )
if not extends and base_classes:
for bnode in base_classes:
for b in bnode.body:
if isinstance(b, ast.FunctionDef):
if b.name == '__init__': continue
if b.name in method_names: continue
args = [self.visit(a) for a in b.args.args][1:]
args = ','.join(args)
if args:
out.append(self.indent()+ '%s(%s) { return %s.__%s(this,%s); }'%(b.name, args, bnode.name, b.name, args) )
else:
out.append(self.indent()+ '%s() { return %s.__%s(this); }'%(b.name, bnode.name, b.name) )
self.pull()
out.append('}')
return '\n'.join(out)
def get_args_kwargs_from_funcdef(self, node, skip_self=False):
args = []
kwargs = []
if skip_self: nargs = node.args.args[1:]
else: nargs = node.args.args
offset = len(nargs) - len(node.args.defaults)
for i, arg in enumerate(nargs):
a = arg.id
dindex = i - offset
if dindex >= 0 and node.args.defaults:
default_value = self.visit( node.args.defaults[dindex] )
kwargs.append( '%s:%s' %(a, default_value) )
else:
args.append( a )
return args, kwargs
def _visit_for_prep_iter_helper(self, node, out, iter_name):
out.append(
#self.indent() + 'if (%s is dict) { %s = %s.keys(); }' %(iter_name, iter_name, iter_name)
self.indent() + 'if (%s is dict) %s = %s.keys();' %(iter_name, iter_name, iter_name)
)
def visit_Expr(self, node):
s = self.visit(node.value)
if isinstance(node.value, ast.Call) and isinstance(node.value.func, ast.Name) and node.value.func.id == 'JS':
if s.endswith('}') and 'return' in s.split(' '):
pass
elif not s.endswith(';'):
s += ';'
elif not s.endswith(';'):
s += ';'
return s
def visit_Print(self, node):
args = [self.visit(e) for e in node.values]
if len(args) > 1:
s = 'print([%s]);' % ', '.join(args)
else:
s = 'print(%s);' % ', '.join(args)
return s
def visit_Assign(self, node):
assert len(node.targets) == 1
target = node.targets[0]
if isinstance(target, ast.Tuple):
#raise NotImplementedError
elts = [self.visit(e) for e in target.elts]
if self.indent():
return '%s = %s' % (','.join(elts), self.visit(node.value))
else:
return 'var %s = %s' % (','.join(elts), self.visit(node.value))
else:
target = self.visit(target)
value = self.visit(node.value)
if self.indent():
code = '%s = %s;' % (target, value)
else:
code = 'var %s = %s;' % (target, value)
return code
def _visit_function(self, node):
getter = False
setter = False
args_typedefs = {}
for decor in node.decorator_list:
if isinstance(decor, ast.Name) and decor.id == 'property':
getter = True
elif isinstance(decor, ast.Attribute) and isinstance(decor.value, ast.Name) and decor.attr == 'setter':
setter = True
elif isinstance(decor, ast.Call) and isinstance(decor.func, ast.Name) and decor.func.id == '__typedef__':
for key in decor.keywords:
args_typedefs[ key.arg ] = key.value.id
else:
raise SyntaxError
args = [] #self.visit(node.args)
oargs = []
offset = len(node.args.args) - len(node.args.defaults)
varargs = False
varargs_name = None
for i, arg in enumerate(node.args.args):
a = arg.id
if a in args_typedefs:
a = '%s %s' %(args_typedefs[a], a)
dindex = i - offset
if a.startswith('__variable_args__'):
varargs_name = a.split('__')[-1]
varargs = ['_vararg_%s'%n for n in range(16) ]
args.append( '[%s]'%','.join(varargs) )
elif dindex >= 0 and node.args.defaults:
default_value = self.visit( node.args.defaults[dindex] )
oargs.append( '%s:%s' %(a, default_value) )
else:
args.append( a )
if oargs:
#args.append( '[%s]' % ','.join(oargs) )
args.append( '{%s}' % ','.join(oargs) )
buffer = self.indent()
if hasattr(node,'_prefix'): buffer += node._prefix + ' '
if getter:
buffer += 'get %s {\n' % node.name
elif setter:
buffer += 'set %s(%s) {\n' % (node.name, ', '.join(args))
else:
buffer += '%s(%s) {\n' % (node.name, ', '.join(args))
self.push()
if varargs:
buffer += 'var %s = new list([]);\n' %varargs_name
for i,n in enumerate(varargs):
buffer += 'if (%s != null) %s.append(%s);\n' %(n, varargs_name, n)
body = list()
for child in node.body:
if isinstance(child, ast.Str):
continue
else:
body.append( self.indent() + self.visit(child) )
buffer += '\n'.join(body)
self.pull()
buffer += '\n%s}\n' %self.indent()
return buffer
def visit_Is(self, node):
return '=='
def visit_IsNot(self, node):
return '!='
def visit_NotEq(self, node):
return '!='
def _visit_call_helper(self, node):
if node.args:
args = [self.visit(e) for e in node.args]
args = ', '.join([e for e in args if e])
else:
args = ''
if isinstance(node.func, ast.Name) and node.func.id == 'range' and len(node.args)==2:
func = '__range2'
else:
func = self.visit(node.func)
if node.keywords:
kwargs = ','.join( ['%s:%s'%(x.arg, self.visit(x.value)) for x in node.keywords] )
if args:
return '%s(%s, %s)' %( func, ','.join(args), kwargs )
else:
return '%s( %s )' %( func, kwargs )
else:
return '%s(%s)' % (func, args)
def _visit_call_helper_var(self, node):
args = [ self.visit(a) for a in node.args ]
if self._function_stack:
fnode = self._function_stack[-1]
rem = []
for arg in args:
if arg in fnode._local_vars:
rem.append( arg )
else:
fnode._local_vars.add( arg )
for arg in rem:
args.remove( arg )
out = []
if args:
out.append( 'var ' + ','.join(args) )
if node.keywords:
for key in node.keywords:
out.append( '%s %s' %(key.value.id, key.arg) )
return ';'.join(out)
def _visit_call_helper_list(self, node):
name = self.visit(node.func)
if node.args:
args = [self.visit(e) for e in node.args]
args = ', '.join([e for e in args if e])
else:
args = '[]' ## the dart list builtin requires an argument
return '%s(%s)' % (name, args)
def _visit_call_helper_numpy_array(self, node):
simd = {
'float32': 'Float32x4'
}
arg_name = args = None
direct = False
if isinstance(node.args[0], ast.Name):
arg_name = node.args[0].id
else:
args = ','.join( [self.visit(a) for a in node.args[0].elts] )
if len(node.args[0].elts) == 4: ## simple rule: if there are 4 items, its a direct SIMD type
direct = True
if node.keywords:
for key in node.keywords:
if key.arg == 'dtype':
if isinstance(key.value, ast.Attribute) and key.value.attr in simd:
if arg_name:
return 'new float32vec( %s )' %arg_name
elif direct:
return 'new %s(%s)' %(simd[key.value.attr] ,args)
else:
return 'new float32vec( [%s] )' %args
else:
raise NotImplementedError('numpy.array requires dtype is given')
def _visit_call_helper_instanceof(self, node):
args = map(self.visit, node.args)
if len(args) == 2:
if args[1] == 'Number':
args[1] = 'num'
return '%s is %s' %tuple(args)
else:
raise SyntaxError( args )
def visit_ExceptHandler(self, node):
return '\n'.join( [self.visit(n) for n in node.body] )
def visit_Compare(self, node):
specials = {
'<' : '__lt__',
'>' : '__gt__',
'<=' : '__lte__',
'>=' : '__gte__'
}
comp = []
if len(node.ops) == 0:
comp.append('(')
comp.append( self.visit(node.left) )
comp.append( ')' )
else:
if self.visit(node.ops[0]) in specials:
pass
else:
comp.append('(')
comp.append( self.visit(node.left) )
comp.append( ')' )
for i in range( len(node.ops) ):
op = self.visit(node.ops[i])
if op in specials:
comp.append( specials[op] + '(%s,' %self.visit(node.left) )
else:
comp.append( op )
if isinstance(node.comparators[i], ast.BinOp):
comp.append('(')
comp.append( self.visit(node.comparators[i]) )
comp.append(')')
else:
comp.append( self.visit(node.comparators[i]) )
if op in specials:
comp.append( ')' )
return ' '.join( comp )
def main(script):
tree = ast.parse(script)
return DartGenerator().visit(tree)
def command():
scripts = []
if len(sys.argv) > 1:
for arg in sys.argv[1:]:
if arg.endswith('.py'):
scripts.append( arg )
if len(scripts):
a = []
for script in scripts:
a.append( open(script, 'rb').read() )
data = '\n'.join( a )
else:
data = sys.stdin.read()
js = main( data )
print( js )
if __name__ == '__main__':
command()
|
|
# coding: utf-8
#
# Copyright 2015 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands that can be used to operate on collections.
All functions here should be agnostic of how CollectionModel objects are
stored in the database. In particular, the various query methods should
delegate to the Collection model class. This will enable the collection
storage model to be changed without affecting this module and others above it.
"""
import collections
import copy
import logging
import os
from constants import constants
from core.domain import activity_services
from core.domain import collection_domain
from core.domain import exp_fetchers
from core.domain import exp_services
from core.domain import rights_manager
from core.domain import search_services
from core.domain import user_services
from core.platform import models
import feconf
import utils
(collection_models, user_models) = models.Registry.import_models([
models.NAMES.collection, models.NAMES.user])
datastore_services = models.Registry.import_datastore_services()
memcache_services = models.Registry.import_memcache_services()
# This takes additional 'title' and 'category' parameters.
CMD_CREATE_NEW = 'create_new'
# Name for the collection search index.
SEARCH_INDEX_COLLECTIONS = 'collections'
# The maximum number of iterations allowed for populating the results of a
# search query.
MAX_ITERATIONS = 10
def _migrate_collection_contents_to_latest_schema(
versioned_collection_contents):
"""Holds the responsibility of performing a step-by-step, sequential update
of the collection structure based on the schema version of the input
collection dictionary. This is very similar to the exploration migration
process seen in exp_services. If any of the current collection schemas
change, a new conversion function must be added and some code appended to
this function to account for that new version.
Args:
versioned_collection_contents: A dict with two keys:
- schema_version: int. The schema version for the collection.
- collection_contents: dict. The dict comprising the collection
contents.
Raises:
Exception: The schema version of the collection is outside of what is
supported at present.
"""
collection_schema_version = versioned_collection_contents['schema_version']
if not (1 <= collection_schema_version
<= feconf.CURRENT_COLLECTION_SCHEMA_VERSION):
raise Exception(
'Sorry, we can only process v1-v%d collection schemas at '
'present.' % feconf.CURRENT_COLLECTION_SCHEMA_VERSION)
while (collection_schema_version <
feconf.CURRENT_COLLECTION_SCHEMA_VERSION):
collection_domain.Collection.update_collection_contents_from_model(
versioned_collection_contents, collection_schema_version)
collection_schema_version += 1
# Repository GET methods.
def _get_collection_memcache_key(collection_id, version=None):
"""Returns a memcache key for the collection.
Args:
collection_id: str. ID of the collection.
version: int. Schema version of the collection.
Returns:
str. The memcache key of the collection.
"""
if version:
return 'collection-version:%s:%s' % (collection_id, version)
else:
return 'collection:%s' % collection_id
def get_collection_from_model(collection_model):
"""Returns a Collection domain object given a collection model loaded
from the datastore.
Args:
collection_model: CollectionModel. The collection model loaded from the
datastore.
Returns:
Collection. A Collection domain object corresponding to the given
collection model.
"""
# Ensure the original collection model does not get altered.
versioned_collection_contents = {
'schema_version': collection_model.schema_version,
'collection_contents':
copy.deepcopy(collection_model.collection_contents)
}
# If collection is in version 2, copy nodes data to collection contents.
if collection_model.schema_version == 2:
versioned_collection_contents['collection_contents'] = {
'nodes': copy.deepcopy(collection_model.nodes)
}
# Migrate the collection if it is not using the latest schema version.
if (collection_model.schema_version !=
feconf.CURRENT_COLLECTION_SCHEMA_VERSION):
_migrate_collection_contents_to_latest_schema(
versioned_collection_contents)
return collection_domain.Collection(
collection_model.id, collection_model.title,
collection_model.category, collection_model.objective,
collection_model.language_code, collection_model.tags,
versioned_collection_contents['schema_version'], [
collection_domain.CollectionNode.from_dict(collection_node_dict)
for collection_node_dict in
versioned_collection_contents['collection_contents']['nodes']
],
collection_model.version, collection_model.created_on,
collection_model.last_updated)
def get_collection_summary_from_model(collection_summary_model):
"""Returns a domain object for an Oppia collection summary given a
collection summary model.
Args:
collection_summary_model: CollectionSummaryModel.
Returns:
CollectionSummary.
"""
return collection_domain.CollectionSummary(
collection_summary_model.id, collection_summary_model.title,
collection_summary_model.category, collection_summary_model.objective,
collection_summary_model.language_code, collection_summary_model.tags,
collection_summary_model.status,
collection_summary_model.community_owned,
collection_summary_model.owner_ids,
collection_summary_model.editor_ids,
collection_summary_model.viewer_ids,
collection_summary_model.contributor_ids,
collection_summary_model.contributors_summary,
collection_summary_model.version,
collection_summary_model.node_count,
collection_summary_model.collection_model_created_on,
collection_summary_model.collection_model_last_updated
)
def get_collection_by_id(collection_id, strict=True, version=None):
"""Returns a domain object representing a collection.
Args:
collection_id: str. ID of the collection.
strict: bool. Whether to fail noisily if no collection with the given
id exists in the datastore.
version: int or None. The version number of the collection to be
retrieved. If it is None, the latest version will be retrieved.
Returns:
Collection or None. The domain object representing a collection with the
given id, or None if it does not exist.
"""
collection_memcache_key = _get_collection_memcache_key(
collection_id, version=version)
memcached_collection = memcache_services.get_multi(
[collection_memcache_key]).get(collection_memcache_key)
if memcached_collection is not None:
return memcached_collection
else:
collection_model = collection_models.CollectionModel.get(
collection_id, strict=strict, version=version)
if collection_model:
collection = get_collection_from_model(collection_model)
memcache_services.set_multi({collection_memcache_key: collection})
return collection
else:
return None
def get_collection_summary_by_id(collection_id):
"""Returns a domain object representing a collection summary.
Args:
collection_id: str. ID of the collection summary.
Returns:
CollectionSummary. The collection summary domain object corresponding to
a collection with the given collection_id.
"""
# TODO(msl): Maybe use memcache similarly to get_collection_by_id.
collection_summary_model = collection_models.CollectionSummaryModel.get(
collection_id, strict=False)
if collection_summary_model:
collection_summary = get_collection_summary_from_model(
collection_summary_model)
return collection_summary
else:
return None
def get_multiple_collections_by_id(collection_ids, strict=True):
"""Returns a dict of domain objects representing collections with the
given ids as keys.
Args:
collection_ids: list(str). A list of collection ids of collections to
be retrieved.
strict: bool. Whether to fail noisily if no collection with a given id
exists in the datastore.
Returns:
A dict of domain objects representing collections with the given ids as
keys.
Raises:
ValueError: 'strict' is True, and one or more of the given collection
ids are invalid.
"""
collection_ids = set(collection_ids)
result = {}
uncached = []
memcache_keys = [_get_collection_memcache_key(i) for i in collection_ids]
cache_result = memcache_services.get_multi(memcache_keys)
for collection_obj in cache_result.itervalues():
result[collection_obj.id] = collection_obj
for _id in collection_ids:
if _id not in result:
uncached.append(_id)
db_collection_models = collection_models.CollectionModel.get_multi(
uncached)
db_results_dict = {}
not_found = []
for index, cid in enumerate(uncached):
model = db_collection_models[index]
if model:
collection = get_collection_from_model(model)
db_results_dict[cid] = collection
else:
logging.info('Tried to fetch collection with id %s, but no such '
'collection exists in the datastore' % cid)
not_found.append(cid)
if strict and not_found:
raise ValueError(
'Couldn\'t find collections with the following ids:\n%s'
% '\n'.join(not_found))
cache_update = {
cid: db_results_dict[cid] for cid in db_results_dict.iterkeys()
if db_results_dict[cid] is not None
}
if cache_update:
memcache_services.set_multi(cache_update)
result.update(db_results_dict)
return result
def get_collection_and_collection_rights_by_id(collection_id):
"""Returns a tuple for collection domain object and collection rights
object.
Args:
collection_id: str. Id of the collection.
Returns:
tuple(Collection|None, CollectionRights|None). The collection and
collection rights domain object, respectively.
"""
collection_and_rights = (
datastore_services.fetch_multiple_entities_by_ids_and_models(
[
('CollectionModel', [collection_id]),
('CollectionRightsModel', [collection_id])
]))
collection = None
if collection_and_rights[0][0] is not None:
collection = get_collection_from_model(
collection_and_rights[0][0])
collection_rights = None
if collection_and_rights[1][0] is not None:
collection_rights = (
rights_manager.get_activity_rights_from_model(
collection_and_rights[1][0],
constants.ACTIVITY_TYPE_COLLECTION))
return (collection, collection_rights)
def get_new_collection_id():
"""Returns a new collection id.
Returns:
str. A new collection id.
"""
return collection_models.CollectionModel.get_new_id('')
# Query methods.
def get_collection_titles_and_categories(collection_ids):
"""Returns collection titles and categories for the given ids.
Args:
collection_ids: list(str). IDs of the collections whose titles and
categories are to be retrieved.
Returns:
A dict with collection ids as keys. The corresponding values
are dicts with the keys 'title' and 'category'.
Any invalid collection_ids will not be included in the return dict. No
error will be raised.
"""
collection_list = [
(get_collection_from_model(e) if e else None)
for e in collection_models.CollectionModel.get_multi(collection_ids)]
result = {}
for collection in collection_list:
if collection is None:
logging.error('Could not find collection corresponding to id')
else:
result[collection.id] = {
'title': collection.title,
'category': collection.category,
}
return result
def get_completed_exploration_ids(user_id, collection_id):
"""Returns a list of explorations the user has completed within the context
of the provided collection.
Args:
user_id: str. ID of the given user.
collection_id: str. ID of the collection.
Returns:
list(str). A list of exploration ids that the user with the given
user id has completed within the context of the provided collection with
the given collection id. The list is empty if the user has not yet
completed any explorations within the collection, or if either the
collection and/or user do not exist.
A progress model isn't added until the first exploration of a collection
is completed, so, if a model is missing, there isn't enough information
to infer whether that means the collection doesn't exist, the user
doesn't exist, or if they just haven't mdae any progress in that
collection yet. Thus, we just assume the user and collection exist for
the sake of this call, so it returns an empty list, indicating that no
progress has yet been made.
"""
progress_model = user_models.CollectionProgressModel.get(
user_id, collection_id)
return progress_model.completed_explorations if progress_model else []
def get_explorations_completed_in_collections(user_id, collection_ids):
"""Returns the ids of the explorations completed in each of the collections.
Args:
user_id: str. ID of the given user.
collection_ids: list(str). IDs of the collections.
Returns:
list(list(str)). List of the exploration ids completed in each
collection.
"""
progress_models = user_models.CollectionProgressModel.get_multi(
user_id, collection_ids)
exploration_ids_completed_in_collections = []
for progress_model in progress_models:
if progress_model:
exploration_ids_completed_in_collections.append(
progress_model.completed_explorations)
else:
exploration_ids_completed_in_collections.append([])
return exploration_ids_completed_in_collections
def get_valid_completed_exploration_ids(user_id, collection):
"""Returns a filtered version of the return value of
get_completed_exploration_ids, which only includes explorations found within
the current version of the collection.
Args:
user_id: str. ID of the given user.
collection: Collection.
Returns:
A filtered version of the return value of get_completed_exploration_ids
which only includes explorations found within the current version of
the collection.
"""
completed_exploration_ids = get_completed_exploration_ids(
user_id, collection.id)
return [
exp_id for exp_id in completed_exploration_ids
if collection.get_node(exp_id)
]
def get_next_exploration_id_to_complete_by_user(user_id, collection_id):
"""Returns the first exploration ID in the specified collection that the
given user has not yet attempted.
Args:
user_id: str. ID of the user.
collection_id: str. ID of the collection.
Returns:
str. The first exploration ID in the specified collection that
the given user has not completed. Returns the collection's initial
exploration if the user has yet to complete any explorations
within the collection.
"""
completed_exploration_ids = get_completed_exploration_ids(
user_id, collection_id)
collection = get_collection_by_id(collection_id)
if completed_exploration_ids:
return collection.get_next_exploration_id(completed_exploration_ids)
else:
# The user has yet to complete any explorations inside the collection.
return collection.first_exploration_id
def record_played_exploration_in_collection_context(
user_id, collection_id, exploration_id):
"""Records a exploration by a given user in a given collection
context as having been played.
Args:
user_id: str. ID of the given user.
collection_id: str. ID of the given collection.
exploration_id: str. ID of the given exploration.
"""
progress_model = user_models.CollectionProgressModel.get_or_create(
user_id, collection_id)
if exploration_id not in progress_model.completed_explorations:
progress_model.completed_explorations.append(exploration_id)
progress_model.put()
def _get_collection_summary_dicts_from_models(collection_summary_models):
"""Given an iterable of CollectionSummaryModel instances, create a dict
containing corresponding collection summary domain objects, keyed by id.
Args:
collection_summary_models: iterable(CollectionSummaryModel). An
iterable of CollectionSummaryModel instances.
Returns:
A dict containing corresponding collection summary domain objects,
keyed by id.
"""
collection_summaries = [
get_collection_summary_from_model(collection_summary_model)
for collection_summary_model in collection_summary_models]
result = {}
for collection_summary in collection_summaries:
result[collection_summary.id] = collection_summary
return result
def get_collection_summaries_matching_ids(collection_ids):
"""Given a list of collection ids, return a list with the corresponding
summary domain objects (or None if the corresponding summary does not
exist).
Args:
collection_ids: A list of collection ids.
Returns:
list(CollectionSummary). A list with the corresponding summary domain
objects.
"""
return [
(get_collection_summary_from_model(model) if model else None)
for model in collection_models.CollectionSummaryModel.get_multi(
collection_ids)]
# TODO(bhenning): Update this function to support also matching the query to
# explorations contained within this collection. Introduce tests to verify this
# behavior.
def get_collection_ids_matching_query(query_string, cursor=None):
"""Returns a list with all collection ids matching the given search query
string, as well as a search cursor for future fetches.
Args:
query_string: str. The search query string.
cursor: str or None. Cursor indicating where, in the list of
collections, to start the search from.
Returns:
2-tuple of (returned_collection_ids, search_cursor), where:
returned_collection_ids : list(str). A list with all collection ids
matching the given search query string, as well as a search
cursor for future fetches. The list contains exactly
feconf.SEARCH_RESULTS_PAGE_SIZE results if there are at least
that many, otherwise it contains all remaining results. (If this
behaviour does not occur, an error will be logged.)
search_cursor: str. Search cursor for future fetches.
"""
returned_collection_ids = []
search_cursor = cursor
for _ in range(MAX_ITERATIONS):
remaining_to_fetch = feconf.SEARCH_RESULTS_PAGE_SIZE - len(
returned_collection_ids)
collection_ids, search_cursor = search_services.search_collections(
query_string, remaining_to_fetch, cursor=search_cursor)
# Collection model cannot be None as we are fetching the collection ids
# through query and there cannot be a collection id for which there is
# no collection.
for ind, _ in enumerate(
collection_models.CollectionSummaryModel.get_multi(
collection_ids)):
returned_collection_ids.append(collection_ids[ind])
# The number of collections in a page is always lesser or equal to
# feconf.SEARCH_RESULTS_PAGE_SIZE.
if len(returned_collection_ids) == feconf.SEARCH_RESULTS_PAGE_SIZE or (
search_cursor is None):
break
return (returned_collection_ids, search_cursor)
# Repository SAVE and DELETE methods.
def apply_change_list(collection_id, change_list):
"""Applies a changelist to a pristine collection and returns the result.
Args:
collection_id: str. ID of the given collection.
change_list: list(dict). A change list to be applied to the given
collection. Each entry is a dict that represents a
CollectionChange.
object.
Returns:
Collection. The resulting collection domain object.
"""
collection = get_collection_by_id(collection_id)
try:
changes = [collection_domain.CollectionChange(change_dict)
for change_dict in change_list]
for change in changes:
if change.cmd == collection_domain.CMD_ADD_COLLECTION_NODE:
collection.add_node(change.exploration_id)
elif change.cmd == collection_domain.CMD_DELETE_COLLECTION_NODE:
collection.delete_node(change.exploration_id)
elif change.cmd == collection_domain.CMD_SWAP_COLLECTION_NODES:
collection.swap_nodes(change.first_index, change.second_index)
elif change.cmd == collection_domain.CMD_EDIT_COLLECTION_PROPERTY:
if (change.property_name ==
collection_domain.COLLECTION_PROPERTY_TITLE):
collection.update_title(change.new_value)
elif (change.property_name ==
collection_domain.COLLECTION_PROPERTY_CATEGORY):
collection.update_category(change.new_value)
elif (change.property_name ==
collection_domain.COLLECTION_PROPERTY_OBJECTIVE):
collection.update_objective(change.new_value)
elif (change.property_name ==
collection_domain.COLLECTION_PROPERTY_LANGUAGE_CODE):
collection.update_language_code(change.new_value)
elif (change.property_name ==
collection_domain.COLLECTION_PROPERTY_TAGS):
collection.update_tags(change.new_value)
elif (
change.cmd ==
collection_domain.CMD_MIGRATE_SCHEMA_TO_LATEST_VERSION):
# Loading the collection model from the datastore into an
# Collection domain object automatically converts it to use the
# latest schema version. As a result, simply resaving the
# collection is sufficient to apply the schema migration.
continue
return collection
except Exception as e:
logging.error(
'%s %s %s %s' % (
e.__class__.__name__, e, collection_id, change_list)
)
raise
def validate_exps_in_collection_are_public(collection):
"""Validates that explorations in a given collection are public.
Args:
collection: Collection. Collection to be validated.
Raises:
ValidationError: The collection contains at least one private
exploration.
"""
for exploration_id in collection.exploration_ids:
if rights_manager.is_exploration_private(exploration_id):
raise utils.ValidationError(
'Cannot reference a private exploration within a public '
'collection, exploration ID: %s' % exploration_id)
def _save_collection(committer_id, collection, commit_message, change_list):
"""Validates a collection and commits it to persistent storage. If
successful, increments the version number of the incoming collection domain
object by 1.
Args:
committer_id: str. ID of the given committer.
collection: Collection. The collection domain object to be saved.
commit_message: str. The commit message.
change_list: list(dict). List of changes applied to a collection. Each
entry in change_list is a dict that represents a CollectionChange.
Raises:
ValidationError: An invalid exploration was referenced in the
collection.
Exception: The collection model and the incoming collection domain
object have different version numbers.
"""
if not change_list:
raise Exception(
'Unexpected error: received an invalid change list when trying to '
'save collection %s: %s' % (collection.id, change_list))
collection_rights = rights_manager.get_collection_rights(collection.id)
if collection_rights.status != rights_manager.ACTIVITY_STATUS_PRIVATE:
collection.validate(strict=True)
else:
collection.validate(strict=False)
# Validate that all explorations referenced by the collection exist.
exp_ids = collection.exploration_ids
exp_summaries = (
exp_fetchers.get_exploration_summaries_matching_ids(exp_ids))
exp_summaries_dict = {
exp_id: exp_summaries[ind] for (ind, exp_id) in enumerate(exp_ids)
}
for collection_node in collection.nodes:
if not exp_summaries_dict[collection_node.exploration_id]:
raise utils.ValidationError(
'Expected collection to only reference valid explorations, '
'but found an exploration with ID: %s (was it deleted?)' %
collection_node.exploration_id)
# Ensure no explorations are being added that are 'below' the public status
# of this collection. If the collection is private, it can have both
# private and public explorations. If it's public, it can only have public
# explorations.
# TODO(bhenning): Ensure the latter is enforced above when trying to
# publish a collection.
if rights_manager.is_collection_public(collection.id):
validate_exps_in_collection_are_public(collection)
# Collection model cannot be none as we are passing the collection as a
# parameter and also this function is called by update_collection which only
# works if the collection is put into the datastore.
collection_model = collection_models.CollectionModel.get(
collection.id, strict=False)
if collection.version > collection_model.version:
raise Exception(
'Unexpected error: trying to update version %s of collection '
'from version %s. Please reload the page and try again.'
% (collection_model.version, collection.version))
elif collection.version < collection_model.version:
raise Exception(
'Trying to update version %s of collection from version %s, '
'which is too old. Please reload the page and try again.'
% (collection_model.version, collection.version))
collection_model.category = collection.category
collection_model.title = collection.title
collection_model.objective = collection.objective
collection_model.language_code = collection.language_code
collection_model.tags = collection.tags
collection_model.schema_version = collection.schema_version
collection_model.collection_contents = {
'nodes': [
collection_node.to_dict() for collection_node in collection.nodes
]
}
collection_model.node_count = len(collection_model.nodes)
collection_model.commit(committer_id, commit_message, change_list)
memcache_services.delete(_get_collection_memcache_key(collection.id))
index_collections_given_ids([collection.id])
collection.version += 1
def _create_collection(committer_id, collection, commit_message, commit_cmds):
"""Creates a new collection, and ensures that rights for a new collection
are saved first. This is because _save_collection() depends on the rights
object being present to tell it whether to do strict validation or not.
Args:
committer_id: str. ID of the committer.
collection: Collection. collection domain object.
commit_message: str. A description of changes made to the collection.
commit_cmds: list(dict). A list of change commands made to the given
collection.
"""
# This line is needed because otherwise a rights object will be created,
# but the creation of an collection object will fail.
collection.validate(strict=False)
rights_manager.create_new_collection_rights(collection.id, committer_id)
model = collection_models.CollectionModel(
id=collection.id,
category=collection.category,
title=collection.title,
objective=collection.objective,
language_code=collection.language_code,
tags=collection.tags,
schema_version=collection.schema_version,
collection_contents={
'nodes': [
collection_node.to_dict()
for collection_node in collection.nodes
]
},
)
model.commit(committer_id, commit_message, commit_cmds)
collection.version += 1
create_collection_summary(collection.id, committer_id)
def save_new_collection(committer_id, collection):
"""Saves a new collection.
Args:
committer_id: str. ID of the committer.
collection: Collection. Collection to be saved.
"""
commit_message = (
'New collection created with title \'%s\'.' % collection.title)
_create_collection(
committer_id, collection, commit_message, [{
'cmd': CMD_CREATE_NEW,
'title': collection.title,
'category': collection.category,
}])
def delete_collection(committer_id, collection_id, force_deletion=False):
"""Deletes the collection with the given collection_id.
IMPORTANT: Callers of this function should ensure that committer_id has
permissions to delete this collection, prior to calling this function.
Args:
committer_id: str. ID of the committer.
collection_id: str. ID of the collection to be deleted.
force_deletion: bool. If true, the collection and its history are fully
deleted and are unrecoverable. Otherwise, the collection and all
its history are marked as deleted, but the corresponding models are
still retained in the datastore. This last option is the preferred
one.
"""
collection_rights_model = collection_models.CollectionRightsModel.get(
collection_id)
collection_rights_model.delete(
committer_id, '', force_deletion=force_deletion)
collection_model = collection_models.CollectionModel.get(collection_id)
collection_model.delete(
committer_id, feconf.COMMIT_MESSAGE_COLLECTION_DELETED,
force_deletion=force_deletion)
# This must come after the collection is retrieved. Otherwise the memcache
# key will be reinstated.
collection_memcache_key = _get_collection_memcache_key(collection_id)
memcache_services.delete(collection_memcache_key)
# Delete the collection from search.
search_services.delete_collections_from_search_index([collection_id])
# Delete the summary of the collection (regardless of whether
# force_deletion is True or not).
delete_collection_summary(collection_id)
# Remove the collection from the featured activity list, if necessary.
activity_services.remove_featured_activity(
constants.ACTIVITY_TYPE_COLLECTION, collection_id)
def get_collection_snapshots_metadata(collection_id):
"""Returns the snapshots for this collection, as dicts.
Args:
collection_id: str. The id of the collection in question.
Returns:
list of dicts, each representing a recent snapshot. Each dict has the
following keys: committer_id, commit_message, commit_cmds, commit_type,
created_on_ms, version_number. The version numbers are consecutive and
in ascending order. There are collection.version_number items in the
returned list.
"""
collection = get_collection_by_id(collection_id)
current_version = collection.version
version_nums = range(1, current_version + 1)
return collection_models.CollectionModel.get_snapshots_metadata(
collection_id, version_nums)
def publish_collection_and_update_user_profiles(committer, collection_id):
"""Publishes the collection with publish_collection() function in
rights_manager.py, as well as updates first_contribution_msec.
It is the responsibility of the caller to check that the collection is
valid prior to publication.
Args:
committer: UserActionsInfo. UserActionsInfo object for the committer.
collection_id: str. ID of the collection to be published.
"""
rights_manager.publish_collection(committer, collection_id)
contribution_time_msec = utils.get_current_time_in_millisecs()
collection_summary = get_collection_summary_by_id(collection_id)
contributor_ids = collection_summary.contributor_ids
for contributor in contributor_ids:
user_services.update_first_contribution_msec_if_not_set(
contributor, contribution_time_msec)
def update_collection(
committer_id, collection_id, change_list, commit_message):
"""Updates a collection. Commits changes.
Args:
committer_id: str. The id of the user who is performing the update
action.
collection_id: str. The collection id.
change_list: list(dict). Each entry represents a CollectionChange
object. These changes are applied in sequence to produce the
resulting collection.
commit_message: str or None. A description of changes made to the
collection. For published collections, this must be present; for
unpublished collections, it may be equal to None.
"""
is_public = rights_manager.is_collection_public(collection_id)
if is_public and not commit_message:
raise ValueError(
'Collection is public so expected a commit message but '
'received none.')
collection = apply_change_list(collection_id, change_list)
_save_collection(committer_id, collection, commit_message, change_list)
update_collection_summary(collection.id, committer_id)
if (not rights_manager.is_collection_private(collection.id) and
committer_id != feconf.MIGRATION_BOT_USER_ID):
user_services.update_first_contribution_msec_if_not_set(
committer_id, utils.get_current_time_in_millisecs())
def create_collection_summary(collection_id, contributor_id_to_add):
"""Creates and stores a summary of the given collection.
Args:
collection_id: str. ID of the collection.
contributor_id_to_add: str. ID of the contributor to be added to the
collection summary.
"""
collection = get_collection_by_id(collection_id)
collection_summary = compute_summary_of_collection(
collection, contributor_id_to_add)
save_collection_summary(collection_summary)
def update_collection_summary(collection_id, contributor_id_to_add):
"""Update the summary of an collection.
Args:
collection_id: str. ID of the collection.
contributor_id_to_add: str. ID of the contributor to be added to the
collection summary.
"""
create_collection_summary(collection_id, contributor_id_to_add)
def compute_summary_of_collection(collection, contributor_id_to_add):
"""Create a CollectionSummary domain object for a given Collection domain
object and return it.
Args:
collection: Collection. The domain object.
contributor_id_to_add: str. ID of the contributor to be added to the
collection summary.
Returns:
CollectionSummary. The computed summary for the given collection.
"""
collection_rights = collection_models.CollectionRightsModel.get_by_id(
collection.id)
collection_summary_model = (
collection_models.CollectionSummaryModel.get_by_id(collection.id))
# Update the contributor id list if necessary (contributors
# defined as humans who have made a positive (i.e. not just
# a revert) change to an collection's content).
if collection_summary_model:
contributor_ids = collection_summary_model.contributor_ids
contributors_summary = collection_summary_model.contributors_summary
else:
contributor_ids = []
contributors_summary = {}
if (contributor_id_to_add is not None and
contributor_id_to_add not in constants.SYSTEM_USER_IDS and
contributor_id_to_add not in contributor_ids):
contributor_ids.append(contributor_id_to_add)
if contributor_id_to_add not in constants.SYSTEM_USER_IDS:
if contributor_id_to_add is None:
# Revert commit or other non-positive commit.
contributors_summary = compute_collection_contributors_summary(
collection.id)
else:
if contributor_id_to_add in contributors_summary:
contributors_summary[contributor_id_to_add] += 1
else:
contributors_summary[contributor_id_to_add] = 1
collection_model_last_updated = collection.last_updated
collection_model_created_on = collection.created_on
collection_model_node_count = len(collection.nodes)
collection_summary = collection_domain.CollectionSummary(
collection.id, collection.title, collection.category,
collection.objective, collection.language_code, collection.tags,
collection_rights.status, collection_rights.community_owned,
collection_rights.owner_ids, collection_rights.editor_ids,
collection_rights.viewer_ids, contributor_ids, contributors_summary,
collection.version, collection_model_node_count,
collection_model_created_on,
collection_model_last_updated
)
return collection_summary
def compute_collection_contributors_summary(collection_id):
"""Computes the contributors' summary for a given collection.
Args:
collection_id: str. ID of the collection.
Returns:
A dict whose keys are user_ids and whose values are the number of
(non-revert) commits made to the given collection by that user_id.
This does not count commits which have since been reverted.
"""
snapshots_metadata = get_collection_snapshots_metadata(collection_id)
current_version = len(snapshots_metadata)
contributors_summary = collections.defaultdict(int)
while True:
snapshot_metadata = snapshots_metadata[current_version - 1]
committer_id = snapshot_metadata['committer_id']
is_revert = (snapshot_metadata['commit_type'] == 'revert')
if not is_revert and committer_id not in constants.SYSTEM_USER_IDS:
contributors_summary[committer_id] += 1
if current_version == 1:
break
if is_revert:
current_version = snapshot_metadata['commit_cmds'][0][
'version_number']
else:
current_version -= 1
return contributors_summary
def save_collection_summary(collection_summary):
"""Save a collection summary domain object as a CollectionSummaryModel
entity in the datastore.
Args:
collection_summary: The collection summary object to be saved in the
datastore.
"""
collection_summary_model = collection_models.CollectionSummaryModel(
id=collection_summary.id,
title=collection_summary.title,
category=collection_summary.category,
objective=collection_summary.objective,
language_code=collection_summary.language_code,
tags=collection_summary.tags,
status=collection_summary.status,
community_owned=collection_summary.community_owned,
owner_ids=collection_summary.owner_ids,
editor_ids=collection_summary.editor_ids,
viewer_ids=collection_summary.viewer_ids,
contributor_ids=collection_summary.contributor_ids,
contributors_summary=collection_summary.contributors_summary,
version=collection_summary.version,
node_count=collection_summary.node_count,
collection_model_last_updated=(
collection_summary.collection_model_last_updated),
collection_model_created_on=(
collection_summary.collection_model_created_on)
)
collection_summary_model.put()
def delete_collection_summary(collection_id):
"""Delete a collection summary model.
Args:
collection_id: str. ID of the collection whose collection summary is to
be deleted.
"""
collection_models.CollectionSummaryModel.get(collection_id).delete()
def save_new_collection_from_yaml(committer_id, yaml_content, collection_id):
"""Saves a new collection from a yaml content string.
Args:
committer_id: str. ID of the committer.
yaml_content: str. The yaml content string specifying a collection.
collection_id: str. ID of the saved collection.
Returns:
Collection. The domain object.
"""
collection = collection_domain.Collection.from_yaml(
collection_id, yaml_content)
commit_message = (
'New collection created from YAML file with title \'%s\'.'
% collection.title)
_create_collection(
committer_id, collection, commit_message, [{
'cmd': CMD_CREATE_NEW,
'title': collection.title,
'category': collection.category,
}])
return collection
def delete_demo(collection_id):
"""Deletes a single demo collection.
Args:
collection_id: str. ID of the demo collection to be deleted.
"""
if not collection_domain.Collection.is_demo_collection_id(collection_id):
raise Exception('Invalid demo collection id %s' % collection_id)
collection = get_collection_by_id(collection_id, strict=False)
if not collection:
logging.info('Collection with id %s was not deleted, because it '
'does not exist.' % collection_id)
else:
delete_collection(
feconf.SYSTEM_COMMITTER_ID, collection_id, force_deletion=True)
def load_demo(collection_id):
"""Loads a demo collection.
The resulting collection will have version 2 (one for its initial
creation and one for its subsequent modification).
Args:
collection_id: str. ID of the collection to be loaded.
"""
delete_demo(collection_id)
demo_filepath = os.path.join(
feconf.SAMPLE_COLLECTIONS_DIR,
feconf.DEMO_COLLECTIONS[collection_id])
yaml_content = utils.get_file_contents(demo_filepath)
collection = save_new_collection_from_yaml(
feconf.SYSTEM_COMMITTER_ID, yaml_content, collection_id)
system_user = user_services.get_system_user()
publish_collection_and_update_user_profiles(system_user, collection_id)
index_collections_given_ids([collection_id])
# Now, load all of the demo explorations that are part of the collection.
for collection_node in collection.nodes:
exp_id = collection_node.exploration_id
# Only load the demo exploration if it is not yet loaded.
if exp_fetchers.get_exploration_by_id(exp_id, strict=False) is None:
exp_services.load_demo(exp_id)
logging.info('Collection with id %s was loaded.' % collection_id)
def index_collections_given_ids(collection_ids):
"""Adds the given collections to the search index.
Args:
collection_ids: list(str). List of collection ids whose collections are
to be indexed.
"""
collection_summaries = get_collection_summaries_matching_ids(collection_ids)
search_services.index_collection_summaries([
collection_summary for collection_summary in collection_summaries
if collection_summary is not None])
|
|
from functools import wraps
from contextlib import AbstractContextManager, ExitStack, contextmanager
from abc import ABC, abstractmethod
from enum import Enum
from operator import *
from funklib.core.prelude import flip, const
from functools import wraps,partial
from collections import namedtuple
def from_context(cm):
"""Extract the value produced by a context manager"""
with cm as x:
return x
class MatchFailure(Exception):
"""Exception raised in case of a pattern match failure"""
def __init__(self, matched=None, pattern=None):
self.matched = matched
self.pattern = pattern
def __repr__(self):
return "MatchFailure(pattern={}, matched={})".format(
self.matched, self.pattern
)
def __str__(self):
return "MatchFailure: value {!r} does not match pattern {!r}".format(self.matched, self.pattern)
class MatchSuccess(Exception):
"""Exception raised in case of match success"""
pass
class matchstatus(Enum):
pending = 0
failed = 1
succeeded = 2
class match:
def __init__(self, value):
self._value = value
# self._tried = 0
# self._actives = []
# self._status = matchstatus.pending
@property
def value(self):
return self._value
@contextmanager
def subcases(self):
try:
with match(self._value) as m:
yield m
raise MatchSuccess
except MatchFailure:
return
@contextmanager
def case(self, pattern=None):
"""Creates a case context.
If an extractor is provided, binds an extractor context to the 'as' clause.
Silence MatchFailure exceptions and raise MatchSuccess if all goes okay."""
try:
if pattern:
yield pattern.of(self._value)
else:
yield None
raise MatchSuccess
except MatchFailure:
return
@contextmanager
def ignore(self):
"""Equivalent to self.case(ignore),
introduce a context without binding anything."""
yield None
raise MatchSuccess
def __enter__(self):
return self
def __exit__(self, t, ex, tb):
if t is MatchSuccess:
return True
if ex is None:
raise MatchFailure("No pattern matches value {!r}".format(self._value))
def __repr__(self):
return "Match({})".format(self._value)
class Pattern(ABC):
def __call__(self, x):
return self.__match__(x)
@abstractmethod
def __match__(self, x):
"""Try and match its argument
and return a value or a tuple of values, or raise MatchFailure"""
pass
@contextmanager
def of(self, x):
yield self.__match__(x)
class ClassPattern(ABC):
@classmethod
@abstractmethod
def __match__(cls, x):
"""Try and match its argument
and return a value or a tuple of values, or raise MatchFailure"""
pass
@classmethod
@contextmanager
def of(cls, x):
yield cls.__match__(x)
class StaticPattern(ABC):
@staticmethod
@abstractmethod
def __match__(x):
"""Try and match its argument
and return a value or a tuple of values, or raise MatchFailure"""
pass
@classmethod
@contextmanager
def of(cls, x):
yield cls.__match__(x)
@contextmanager
def match_except(*exceptions):
"""Context manager that transforms
specified exceptions in MatchFailure exceptions
:param exceptions: exceptions to be transformed into a match failure"""
try:
yield None
except exceptions as ex:
raise MatchFailure() from ex
class Key(Pattern):
"""Pattern that match a gettable object that contains a given key,
exposing the value associated with that key"""
def __init__(self, key):
self.key = key
@match_except(KeyError, TypeError)
def __match__(self, x):
return x[self.key]
class Keys(Pattern):
"""Pattern that match a mapping which includes certain keys"""
def __init__(self, keys):
self.keys = keys
@match_except(KeyError, TypeError)
def __match__(self, x):
return tuple(x[k] for k in self.keys)
class Attr(Pattern):
"""Pattern that match an object which has a specified attribute,
exposing that attribute"""
def __init__(self, attribute):
self.attribute = attribute
@match_except(AttributeError)
def __match__(self, x):
return getattr(x, self.attribute)
class Attrs(Pattern):
"""Pattern that match an object which has all specified attributes,
exposing all those attributes"""
def __init__(self, *attributes):
self.attributes = attributes
@match_except(AttributeError)
def __match__(self, x):
return tuple(getattr(x, attr) for attr in self.attributes)
class Any(Pattern):
"""Pattern that match any value"""
def __match__(self, x):
return x
def pattern(Pattern):
def __init__(self, func):
self.pattern = func
def __match__(self, x):
return self.pattern(x)
_ignore = const(None)
ignore = pattern(_ignore)
def ismatch(value, pattern):
"""Evaluate a match pattern, return True if match else False"""
try:
pattern.__match__(value)
return True
except MatchFailure:
return False
class Symbol(str): pass
_NoDefault = Symbol("NoDefault")
def getmatch(value, pattern, default=_NoDefault):
try:
return pattern.__match__(value)
except MatchFailure:
if default is not _NoDefault:
return default
else:
raise
def predicate_method(f):
@wraps(f)
def wrapper(self, arg):
if f(self, arg):
return arg
else:
raise MatchFailure()
return wrapper
def predicate_classmethod(f):
@wraps(f)
def wrapper(cls, arg):
if f(cls, arg):
return arg
else:
raise MatchFailure
return wrapper
def predicate_function(f):
@wraps(f)
def wrapper(arg):
if f(arg):
return arg
else:
raise MatchFailure
return wrapper
class Predicate(Pattern):
"""Base class for 'predicate' objects implementing the match protocol"""
def __init__(self, predicate):
self.predicate = predicate
def __match__(self, x):
if self.predicate(x):
return x
else:
raise MatchFailure(matched=x, pattern=self)
def __repr__(self):
return "Predicate({})".format(self.predicate)
class Is(Predicate):
def __init__(self, identity):
self.identity = identity
self.predicate = partial(is_, identity)
def __match__(self, x):
if x is self.identity:
return x
else:
raise MatchFailure(matched=x, pattern=self)
class Equal(Predicate):
def __init__(self, value):
self.equal = value
self.predicate = partial(eq, value)
@predicate_method
def __match__(self, x):
if x == self.equal:
return x
else:
raise MatchFailure(matched=x, pattern=self)
class In(Predicate):
def __init__(self, iterable):
self.container = iterable
self.predicate = partial(contains, iterable)
@predicate_method
def __match__(self, x):
if x in self.container:
return x
else:
raise MatchFailure(matched=x, pattern=self)
class Compose(Pattern):
"""
Pattern combiner that applies patterns in chain,
matching the composition of all patterns,
and failing if any of them fails
"""
def __init__(self, *patterns):
self.patterns = patterns
def __match__(self, x):
m = x
for p in reversed(self.patterns):
m = getmatch(m, p)
return m
class AsPredicate(Pattern):
def __init__(self, pattern):
self.pattern = pattern
def __match__(self, x):
if ismatch(x, self.pattern):
return x
else:
raise MatchFailure(matched=x, pattern=self.pattern)
WithMatch = namedtuple("WithMatch", ("value", "match"))
class With(Pattern):
def __init__(self, pattern):
self.pattern = pattern
def __match__(self, x):
m = getmatch(x, self.pattern)
return WithMatch(value=x, match=m)
class All(Predicate):
"""Predicate combiner that match a value which is matched by all subpredicates"""
def __init__(self, *predicates):
def _all(x):
return all(map(partial(ismatch, x), predicates))
self.predicates = predicates
self.predicate = _all
@predicate_method
def __match__(self, x):
return all(map(partial(ismatch, x), self.predicates))
class AnyOf(Predicate):
"""Predicates combiner that match a value which is matched by any(at least one) subpredicates"""
def __init__(self, *predicates):
def _any(x):
return any(map(partial(ismatch, x), predicates))
self.predicates = predicates
self.predicate = _any
@predicate_method
def __match__(self, x):
return any(map(partial(ismatch, x), self.predicates))
class OneOf(Predicate):
"""Predicates combiner that match a value which is matched by one and only one subpredicate"""
def __init__(self, *predicates):
def _oneof(x):
return len(tuple(map(partial(ismatch, x), self.predicates))) == 1
self.predicates = predicates
self.predicate = _oneof
@predicate_method
def __match__(self, x):
return len(tuple((partial(ismatch, x), self.predicates))) == 1
class Type(Predicate):
"""Predicate that match a value by its type"""
def __init__(self, t):
self.type = t
self.predicate = partial(flip(isinstance), t)
@predicate_method
def __match__(self, x):
return isinstance(x, self.type)
class Many(Pattern):
def __init__(self, patterns):
self.patterns = patterns
def __match__(self, x):
return tuple(map(partial(getmatch, x), self.patterns))
|
|
from base64 import b64encode
import json
import logging
import os
import re
import shutil
import sys
import requests
from requests.compat import urljoin
import wandb
try:
from IPython.core.getipython import get_ipython
from IPython.core.magic import line_cell_magic, Magics, magics_class
from IPython.core.magic_arguments import argument, magic_arguments, parse_argstring
from IPython.display import display
except ImportError:
wandb.termwarn("ipython is not supported in python 2.7, upgrade to 3.x")
class Magics(object):
pass
def magics_class(*args, **kwargs):
return lambda *args, **kwargs: None
def magic_arguments(*args, **kwargs):
return lambda *args, **kwargs: None
def argument(*args, **kwargs):
return lambda *args, **kwargs: None
def line_cell_magic(*args, **kwargs):
return lambda *args, **kwargs: None
logger = logging.getLogger(__name__)
__IFrame = None
def maybe_display():
"""Display a run if the user added cell magic and we have run"""
if __IFrame is not None:
return __IFrame.maybe_display()
return False
def quiet():
if __IFrame is not None:
return __IFrame.opts.get("quiet")
return False
class IFrame(object):
def __init__(self, path=None, opts=None):
self.path = path
self.api = wandb.Api()
self.opts = opts or {}
self.displayed = False
self.height = self.opts.get("height", 420)
def maybe_display(self) -> bool:
if not self.displayed and (self.path or wandb.run):
display(self)
return self.displayed
def _repr_html_(self):
try:
self.displayed = True
if self.opts.get("workspace", False):
if self.path is None and wandb.run:
self.path = wandb.run.path
if isinstance(self.path, str):
object = self.api.from_path(self.path)
else:
object = wandb.run
if object is None:
if wandb.Api().api_key is None:
return "You must be logged in to render wandb in jupyter, run `wandb.login()`"
else:
object = self.api.project(
"/".join(
[
wandb.Api().default_entity,
wandb.util.auto_project_name(None),
]
)
)
return object.to_html(self.height, hidden=False)
except wandb.Error as e:
return "Can't display wandb interface<br/>{}".format(e)
@magics_class
class WandBMagics(Magics):
def __init__(self, shell, require_interaction=False):
super(WandBMagics, self).__init__(shell)
self.options = {}
@magic_arguments()
@argument(
"path",
default=None,
nargs="?",
help="A path to a resource you want to display, defaults to wandb.run.path",
)
@argument(
"-w",
"--workspace",
default=False,
action="store_true",
help="Display the entire run project workspace",
)
@argument(
"-q",
"--quiet",
default=False,
action="store_true",
help="Display the minimal amount of output",
)
@argument(
"-h",
"--height",
default=420,
type=int,
help="The height of the iframe in pixels",
)
@line_cell_magic
def wandb(self, line, cell=None):
"""Display wandb resources in jupyter. This can be used as cell or line magic.
%wandb USERNAME/PROJECT/runs/RUN_ID
---
%%wandb -h 1024
with wandb.init() as run:
run.log({"loss": 1})
"""
# Record options
args = parse_argstring(self.wandb, line)
self.options["height"] = args.height
self.options["workspace"] = args.workspace
self.options["quiet"] = args.quiet
iframe = IFrame(args.path, opts=self.options)
displayed = iframe.maybe_display()
if cell is not None:
if not displayed:
# Store the IFrame globally and attempt to display if we have a run
cell = (
f"wandb.jupyter.__IFrame = wandb.jupyter.IFrame(opts={self.options})\n"
+ cell
+ "\nwandb.jupyter.__IFrame = None"
)
get_ipython().run_cell(cell)
def notebook_metadata_from_jupyter_servers_and_kernel_id():
servers, kernel_id = jupyter_servers_and_kernel_id()
for s in servers:
if s.get("password"):
raise ValueError("Can't query password protected kernel")
res = requests.get(
urljoin(s["url"], "api/sessions"), params={"token": s.get("token", "")}
).json()
for nn in res:
# TODO: wandb/client#400 found a case where res returned an array of
# strings...
if isinstance(nn, dict) and nn.get("kernel") and "notebook" in nn:
if nn["kernel"]["id"] == kernel_id:
return {
"root": s.get("root_dir", s.get("notebook_dir", os.getcwd())),
"path": nn["notebook"]["path"],
"name": nn["notebook"]["name"],
}
return None
def notebook_metadata(silent):
"""Attempts to query jupyter for the path and name of the notebook file.
This can handle many different jupyter environments, specifically:
1. Colab
2. Kaggle
3. JupyterLab
4. Notebooks
5. Other?
"""
error_message = (
"Failed to detect the name of this notebook, you can set it manually with "
"the WANDB_NOTEBOOK_NAME environment variable to enable code saving."
)
try:
# In colab we can request the most recent contents
ipynb = attempt_colab_load_ipynb()
if ipynb:
ret = {
"root": "/content",
"path": ipynb["metadata"]["colab"]["name"],
"name": ipynb["metadata"]["colab"]["name"],
}
try:
jupyter_metadata = (
notebook_metadata_from_jupyter_servers_and_kernel_id()
)
except RuntimeError:
pass
else:
ret["path"] = jupyter_metadata["path"]
return ret
if wandb.util._is_kaggle():
# In kaggle we can request the most recent contents
ipynb = attempt_kaggle_load_ipynb()
if ipynb:
return {
"root": "/kaggle/working",
"path": ipynb["metadata"]["name"],
"name": ipynb["metadata"]["name"],
}
jupyter_metadata = notebook_metadata_from_jupyter_servers_and_kernel_id()
if jupyter_metadata:
return jupyter_metadata
if not silent:
logger.error(error_message)
return {}
except Exception:
# TODO: report this exception
# TODO: Fix issue this is not the logger initialized in in wandb.init()
# since logger is not attached, outputs to notebook
if not silent:
logger.error(error_message)
return {}
def jupyter_servers_and_kernel_id():
"""Returns a list of servers and the current kernel_id so we can query for
the name of the notebook"""
try:
import ipykernel
kernel_id = re.search(
"kernel-(.*).json", ipykernel.connect.get_connection_file()
).group(1)
# We're either in jupyterlab or a notebook, lets prefer the newer jupyter_server package
serverapp = wandb.util.get_module("jupyter_server.serverapp")
notebookapp = wandb.util.get_module("notebook.notebookapp")
servers = []
if serverapp is not None:
servers.extend(list(serverapp.list_running_servers()))
if notebookapp is not None:
servers.extend(list(notebookapp.list_running_servers()))
return servers, kernel_id
except (AttributeError, ValueError, ImportError):
return [], None
def attempt_colab_load_ipynb():
colab = wandb.util.get_module("google.colab")
if colab:
# This isn't thread safe, never call in a thread
response = colab._message.blocking_request("get_ipynb", timeout_sec=5)
if response:
return response["ipynb"]
def attempt_kaggle_load_ipynb():
kaggle = wandb.util.get_module("kaggle_session")
if kaggle:
try:
client = kaggle.UserSessionClient()
parsed = json.loads(client.get_exportable_ipynb()["source"])
# TODO: couldn't find a way to get the name of the notebook...
parsed["metadata"]["name"] = "kaggle.ipynb"
return parsed
except Exception:
logger.exception("Unable to load kaggle notebook")
return None
def attempt_colab_login(app_url):
"""This renders an iframe to wandb in the hopes it posts back an api key"""
from google.colab import output
from google.colab._message import MessageError
from IPython import display
display.display(
display.Javascript(
"""
window._wandbApiKey = new Promise((resolve, reject) => {
function loadScript(url) {
return new Promise(function(resolve, reject) {
let newScript = document.createElement("script");
newScript.onerror = reject;
newScript.onload = resolve;
document.body.appendChild(newScript);
newScript.src = url;
});
}
loadScript("https://cdn.jsdelivr.net/npm/postmate/build/postmate.min.js").then(() => {
const iframe = document.createElement('iframe')
iframe.style.cssText = "width:0;height:0;border:none"
document.body.appendChild(iframe)
const handshake = new Postmate({
container: iframe,
url: '%s/authorize'
});
const timeout = setTimeout(() => reject("Couldn't auto authenticate"), 5000)
handshake.then(function(child) {
child.on('authorize', data => {
clearTimeout(timeout)
resolve(data)
});
});
})
});
""" # noqa: E501
% app_url.replace("http:", "https:")
)
)
try:
return output.eval_js("_wandbApiKey")
except MessageError:
return None
class Notebook(object):
def __init__(self, settings):
self.outputs = {}
self.settings = settings
self.shell = get_ipython()
def save_display(self, exc_count, data_with_metadata):
self.outputs[exc_count] = self.outputs.get(exc_count, [])
# byte values such as images need to be encoded in base64
# otherwise nbformat.v4.new_output will throw a NotebookValidationError
data = data_with_metadata["data"]
b64_data = {}
for key in data:
val = data[key]
if isinstance(val, bytes):
b64_data[key] = b64encode(val).decode("utf-8")
else:
b64_data[key] = val
self.outputs[exc_count].append(
{"data": b64_data, "metadata": data_with_metadata["metadata"]}
)
def probe_ipynb(self):
"""Return notebook as dict or None."""
relpath = self.settings._jupyter_path
if relpath:
if os.path.exists(relpath):
with open(relpath, "r") as json_file:
data = json.load(json_file)
return data
colab_ipynb = attempt_colab_load_ipynb()
if colab_ipynb:
return colab_ipynb
kaggle_ipynb = attempt_kaggle_load_ipynb()
if kaggle_ipynb and len(kaggle_ipynb["cells"]) > 0:
return kaggle_ipynb
return
def save_ipynb(self):
if not self.settings.save_code:
logger.info("not saving jupyter notebook")
return False
relpath = self.settings._jupyter_path
logger.info("looking for notebook: %s", relpath)
if relpath:
if os.path.exists(relpath):
shutil.copy(
relpath,
os.path.join(
self.settings._tmp_code_dir, os.path.basename(relpath)
),
)
return True
# TODO: likely only save if the code has changed
colab_ipynb = attempt_colab_load_ipynb()
if colab_ipynb:
with open(
os.path.join(
self.settings._tmp_code_dir,
colab_ipynb["metadata"]["colab"]["name"],
),
"w",
encoding="utf-8",
) as f:
f.write(json.dumps(colab_ipynb))
return True
kaggle_ipynb = attempt_kaggle_load_ipynb()
if kaggle_ipynb and len(kaggle_ipynb["cells"]) > 0:
with open(
os.path.join(
self.settings._tmp_code_dir, kaggle_ipynb["metadata"]["name"]
),
"w",
encoding="utf-8",
) as f:
f.write(json.dumps(kaggle_ipynb))
return True
return False
def save_history(self):
"""This saves all cell executions in the current session as a new notebook"""
try:
from nbformat import write, v4, validator
except ImportError:
logger.error("Run pip install nbformat to save notebook history")
return
# TODO: some tests didn't patch ipython properly?
if self.shell is None:
return
cells = []
hist = list(self.shell.history_manager.get_range(output=True))
if len(hist) <= 1 or not self.settings.save_code:
logger.info("not saving jupyter history")
return
try:
for _, execution_count, exc in hist:
if exc[1]:
# TODO: capture stderr?
outputs = [
v4.new_output(output_type="stream", name="stdout", text=exc[1])
]
else:
outputs = []
if self.outputs.get(execution_count):
for out in self.outputs[execution_count]:
outputs.append(
v4.new_output(
output_type="display_data",
data=out["data"],
metadata=out["metadata"] or {},
)
)
cells.append(
v4.new_code_cell(
execution_count=execution_count, source=exc[0], outputs=outputs
)
)
if hasattr(self.shell, "kernel"):
language_info = self.shell.kernel.language_info
else:
language_info = {"name": "python", "version": sys.version}
logger.info("saving %i cells to _session_history.ipynb", len(cells))
nb = v4.new_notebook(
cells=cells,
metadata={
"kernelspec": {
"display_name": "Python %i" % sys.version_info[0],
"name": "python%i" % sys.version_info[0],
"language": "python",
},
"language_info": language_info,
},
)
state_path = os.path.join("code", "_session_history.ipynb")
wandb.run._set_config_wandb("session_history", state_path)
wandb.util.mkdir_exists_ok(os.path.join(wandb.run.dir, "code"))
with open(
os.path.join(self.settings._tmp_code_dir, "_session_history.ipynb"),
"w",
encoding="utf-8",
) as f:
write(nb, f, version=4)
with open(
os.path.join(wandb.run.dir, state_path), "w", encoding="utf-8"
) as f:
write(nb, f, version=4)
except (OSError, validator.NotebookValidationError) as e:
logger.error("Unable to save ipython session history:\n%s", e)
pass
|
|
from __future__ import division, print_function, absolute_import
import time
import numpy as np
from scipy.sparse.linalg import LinearOperator
from .._differentiable_functions import VectorFunction
from .._constraints import (
NonlinearConstraint, LinearConstraint, PreparedConstraint, strict_bounds)
from .._hessian_update_strategy import BFGS
from ..optimize import OptimizeResult
from .._differentiable_functions import ScalarFunction
from .equality_constrained_sqp import equality_constrained_sqp
from .canonical_constraint import (CanonicalConstraint,
initial_constraints_as_canonical)
from .tr_interior_point import tr_interior_point
from .report import BasicReport, SQPReport, IPReport
TERMINATION_MESSAGES = {
0: "The maximum number of function evaluations is exceeded.",
1: "`gtol` termination condition is satisfied.",
2: "`xtol` termination condition is satisfied.",
3: "`callback` function requested termination."
}
class HessianLinearOperator(object):
"""Build LinearOperator from hessp"""
def __init__(self, hessp, n):
self.hessp = hessp
self.n = n
def __call__(self, x, *args):
def matvec(p):
return self.hessp(x, p, *args)
return LinearOperator((self.n, self.n), matvec=matvec)
class LagrangianHessian(object):
"""The Hessian of the Lagrangian as LinearOperator.
The Lagrangian is computed as the objective function plus all the
constraints multiplied with some numbers (Lagrange multipliers).
"""
def __init__(self, n, objective_hess, constraints_hess):
self.n = n
self.objective_hess = objective_hess
self.constraints_hess = constraints_hess
def __call__(self, x, v_eq=np.empty(0), v_ineq=np.empty(0)):
H_objective = self.objective_hess(x)
H_constraints = self.constraints_hess(x, v_eq, v_ineq)
def matvec(p):
return H_objective.dot(p) + H_constraints.dot(p)
return LinearOperator((self.n, self.n), matvec)
def update_state_sqp(state, x, last_iteration_failed, objective, prepared_constraints,
start_time, tr_radius, constr_penalty, cg_info):
state.nit += 1
state.nfev = objective.nfev
state.njev = objective.ngev
state.nhev = objective.nhev
state.constr_nfev = [c.fun.nfev if isinstance(c.fun, VectorFunction) else 0
for c in prepared_constraints]
state.constr_njev = [c.fun.njev if isinstance(c.fun, VectorFunction) else 0
for c in prepared_constraints]
state.constr_nhev = [c.fun.nhev if isinstance(c.fun, VectorFunction) else 0
for c in prepared_constraints]
if not last_iteration_failed:
state.x = x
state.fun = objective.f
state.grad = objective.g
state.v = [c.fun.v for c in prepared_constraints]
state.constr = [c.fun.f for c in prepared_constraints]
state.jac = [c.fun.J for c in prepared_constraints]
# Compute Lagrangian Gradient
state.lagrangian_grad = np.copy(state.grad)
for c in prepared_constraints:
state.lagrangian_grad += c.fun.J.T.dot(c.fun.v)
state.optimality = np.linalg.norm(state.lagrangian_grad, np.inf)
# Compute maximum constraint violation
state.constr_violation = 0
for i in range(len(prepared_constraints)):
lb, ub = prepared_constraints[i].bounds
c = state.constr[i]
state.constr_violation = np.max([state.constr_violation,
np.max(lb - c),
np.max(c - ub)])
state.execution_time = time.time() - start_time
state.tr_radius = tr_radius
state.constr_penalty = constr_penalty
state.cg_niter += cg_info["niter"]
state.cg_stop_cond = cg_info["stop_cond"]
return state
def update_state_ip(state, x, last_iteration_failed, objective,
prepared_constraints, start_time,
tr_radius, constr_penalty, cg_info,
barrier_parameter, barrier_tolerance):
state = update_state_sqp(state, x, last_iteration_failed, objective,
prepared_constraints, start_time, tr_radius,
constr_penalty, cg_info)
state.barrier_parameter = barrier_parameter
state.barrier_tolerance = barrier_tolerance
return state
def _minimize_trustregion_constr(fun, x0, args, grad,
hess, hessp, bounds, constraints,
xtol=1e-8, gtol=1e-8,
barrier_tol=1e-8,
sparse_jacobian=None,
callback=None, maxiter=1000,
verbose=0, finite_diff_rel_step=None,
initial_constr_penalty=1.0, initial_tr_radius=1.0,
initial_barrier_parameter=0.1,
initial_barrier_tolerance=0.1,
factorization_method=None,
disp=False):
"""Minimize a scalar function subject to constraints.
Parameters
----------
gtol : float, optional
Tolerance for termination by the norm of the Lagrangian gradient.
The algorithm will terminate when both the infinity norm (i.e., max
abs value) of the Lagrangian gradient and the constraint violation
are smaller than ``gtol``. Default is 1e-8.
xtol : float, optional
Tolerance for termination by the change of the independent variable.
The algorithm will terminate when ``tr_radius < xtol``, where
``tr_radius`` is the radius of the trust region used in the algorithm.
Default is 1e-8.
barrier_tol : float, optional
Threshold on the barrier parameter for the algorithm termination.
When inequality constraints are present, the algorithm will terminate
only when the barrier parameter is less than `barrier_tol`.
Default is 1e-8.
sparse_jacobian : {bool, None}, optional
Determines how to represent Jacobians of the constraints. If bool,
then Jacobians of all the constraints will be converted to the
corresponding format. If None (default), then Jacobians won't be
converted, but the algorithm can proceed only if they all have the
same format.
initial_tr_radius: float, optional
Initial trust radius. The trust radius gives the maximum distance
between solution points in consecutive iterations. It reflects the
trust the algorithm puts in the local approximation of the optimization
problem. For an accurate local approximation the trust-region should be
large and for an approximation valid only close to the current point it
should be a small one. The trust radius is automatically updated throughout
the optimization process, with ``initial_tr_radius`` being its initial value.
Default is 1 (recommended in [1]_, p. 19).
initial_constr_penalty : float, optional
Initial constraints penalty parameter. The penalty parameter is used for
balancing the requirements of decreasing the objective function
and satisfying the constraints. It is used for defining the merit function:
``merit_function(x) = fun(x) + constr_penalty * constr_norm_l2(x)``,
where ``constr_norm_l2(x)`` is the l2 norm of a vector containing all
the constraints. The merit function is used for accepting or rejecting
trial points and ``constr_penalty`` weights the two conflicting goals
of reducing objective function and constraints. The penalty is automatically
updated throughout the optimization process, with
``initial_constr_penalty`` being its initial value. Default is 1
(recommended in [1]_, p 19).
initial_barrier_parameter, initial_barrier_tolerance: float, optional
Initial barrier parameter and initial tolerance for the barrier subproblem.
Both are used only when inequality constraints are present. For dealing with
optimization problems ``min_x f(x)`` subject to inequality constraints
``c(x) <= 0`` the algorithm introduces slack variables, solving the problem
``min_(x,s) f(x) + barrier_parameter*sum(ln(s))`` subject to the equality
constraints ``c(x) + s = 0`` instead of the original problem. This subproblem
is solved for decreasing values of ``barrier_parameter`` and with decreasing
tolerances for the termination, starting with ``initial_barrier_parameter``
for the barrier parameter and ``initial_barrier_tolerance`` for the
barrier tolerance. Default is 0.1 for both values (recommended in [1]_ p. 19).
Also note that ``barrier_parameter`` and ``barrier_tolerance`` are updated
with the same prefactor.
factorization_method : string or None, optional
Method to factorize the Jacobian of the constraints. Use None (default)
for the auto selection or one of:
- 'NormalEquation' (requires scikit-sparse)
- 'AugmentedSystem'
- 'QRFactorization'
- 'SVDFactorization'
The methods 'NormalEquation' and 'AugmentedSystem' can be used only
with sparse constraints. The projections required by the algorithm
will be computed using, respectively, the the normal equation and the
augmented system approaches explained in [1]_. 'NormalEquation'
computes the Cholesky factorization of ``A A.T`` and 'AugmentedSystem'
performs the LU factorization of an augmented system. They usually
provide similar results. 'AugmentedSystem' is used by default for
sparse matrices.
The methods 'QRFactorization' and 'SVDFactorization' can be used
only with dense constraints. They compute the required projections
using, respectively, QR and SVD factorizations. The 'SVDFactorization'
method can cope with Jacobian matrices with deficient row rank and will
be used whenever other factorization methods fail (which may imply the
conversion of sparse matrices to a dense format when required).
By default, 'QRFactorization' is used for dense matrices.
finite_diff_rel_step : None or array_like, optional
Relative step size for the finite difference approximation.
maxiter : int, optional
Maximum number of algorithm iterations. Default is 1000.
verbose : {0, 1, 2}, optional
Level of algorithm's verbosity:
* 0 (default) : work silently.
* 1 : display a termination report.
* 2 : display progress during iterations.
* 3 : display progress during iterations (more complete report).
disp : bool, optional
If True (default), then `verbose` will be set to 1 if it was 0.
Returns
-------
`OptimizeResult` with the fields documented below. Note the following:
1. All values corresponding to the constraints are ordered as they
were passed to the solver. And values corresponding to `bounds`
constraints are put *after* other constraints.
2. All numbers of function, Jacobian or Hessian evaluations correspond
to numbers of actual Python function calls. It means, for example,
that if a Jacobian is estimated by finite differences, then the
number of Jacobian evaluations will be zero and the number of
function evaluations will be incremented by all calls during the
finite difference estimation.
x : ndarray, shape (n,)
Solution found.
optimality : float
Infinity norm of the Lagrangian gradient at the solution.
constr_violation : float
Maximum constraint violation at the solution.
fun : float
Objective function at the solution.
grad : ndarray, shape (n,)
Gradient of the objective function at the solution.
lagrangian_grad : ndarray, shape (n,)
Gradient of the Lagrangian function at the solution.
nit : int
Total number of iterations.
nfev : integer
Number of the objective function evaluations.
njev : integer
Number of the objective function gradient evaluations.
nhev : integer
Number of the objective function Hessian evaluations.
cg_niter : int
Total number of the conjugate gradient method iterations.
method : {'equality_constrained_sqp', 'tr_interior_point'}
Optimization method used.
constr : list of ndarray
List of constraint values at the solution.
jac : list of {ndarray, sparse matrix}
List of the Jacobian matrices of the constraints at the solution.
v : list of ndarray
List of the Lagrange multipliers for the constraints at the solution.
For an inequality constraint a positive multiplier means that the upper
bound is active, a negative multiplier means that the lower bound is
active and if a multiplier is zero it means the constraint is not
active.
constr_nfev : list of int
Number of constraint evaluations for each of the constraints.
constr_njev : list of int
Number of Jacobian matrix evaluations for each of the constraints.
constr_nhev : list of int
Number of Hessian evaluations for each of the constraints.
tr_radius : float
Radius of the trust region at the last iteration.
constr_penalty : float
Penalty parameter at the last iteration, see `initial_constr_penalty`.
barrier_tolerance : float
Tolerance for the barrier subproblem at the last iteration.
Only for problems with inequality constraints.
barrier_parameter : float
Barrier parameter at the last iteration. Only for problems
with inequality constraints.
execution_time : float
Total execution time.
message : str
Termination message.
status : {0, 1, 2, 3}
Termination status:
* 0 : The maximum number of function evaluations is exceeded.
* 1 : `gtol` termination condition is satisfied.
* 2 : `xtol` termination condition is satisfied.
* 3 : `callback` function requested termination.
cg_stop_cond : int
Reason for CG subproblem termination at the last iteration:
* 0 : CG subproblem not evaluated.
* 1 : Iteration limit was reached.
* 2 : Reached the trust-region boundary.
* 3 : Negative curvature detected.
* 4 : Tolerance was satisfied.
References
----------
.. [1] Conn, A. R., Gould, N. I., & Toint, P. L.
Trust region methods. 2000. Siam. pp. 19.
"""
x0 = np.atleast_1d(x0).astype(float)
n_vars = np.size(x0)
if hess is None:
if callable(hessp):
hess = HessianLinearOperator(hessp, n_vars)
else:
hess = BFGS()
if disp and verbose == 0:
verbose = 1
if bounds is not None:
finite_diff_bounds = strict_bounds(bounds.lb, bounds.ub,
bounds.keep_feasible, n_vars)
else:
finite_diff_bounds = (-np.inf, np.inf)
# Define Objective Function
objective = ScalarFunction(fun, x0, args, grad, hess,
finite_diff_rel_step, finite_diff_bounds)
# Put constraints in list format when needed.
if isinstance(constraints, (NonlinearConstraint, LinearConstraint)):
constraints = [constraints]
# Prepare constraints.
prepared_constraints = [
PreparedConstraint(c, x0, sparse_jacobian, finite_diff_bounds)
for c in constraints]
# Check that all constraints are either sparse or dense.
n_sparse = sum(c.fun.sparse_jacobian for c in prepared_constraints)
if 0 < n_sparse < len(prepared_constraints):
raise ValueError("All constraints must have the same kind of the "
"Jacobian --- either all sparse or all dense. "
"You can set the sparsity globally by setting "
"`sparse_jacobian` to either True of False.")
if prepared_constraints:
sparse_jacobian = n_sparse > 0
if bounds is not None:
if sparse_jacobian is None:
sparse_jacobian = True
prepared_constraints.append(PreparedConstraint(bounds, x0,
sparse_jacobian))
# Concatenate initial constraints to the canonical form.
c_eq0, c_ineq0, J_eq0, J_ineq0 = initial_constraints_as_canonical(
n_vars, prepared_constraints, sparse_jacobian)
# Prepare all canonical constraints and concatenate it into one.
canonical_all = [CanonicalConstraint.from_PreparedConstraint(c)
for c in prepared_constraints]
if len(canonical_all) == 0:
canonical = CanonicalConstraint.empty(n_vars)
elif len(canonical_all) == 1:
canonical = canonical_all[0]
else:
canonical = CanonicalConstraint.concatenate(canonical_all,
sparse_jacobian)
# Generate the Hessian of the Lagrangian.
lagrangian_hess = LagrangianHessian(n_vars, objective.hess, canonical.hess)
# Choose appropriate method
if canonical.n_ineq == 0:
method = 'equality_constrained_sqp'
else:
method = 'tr_interior_point'
# Construct OptimizeResult
state = OptimizeResult(
nit=0, nfev=0, njev=0, nhev=0,
cg_niter=0, cg_stop_cond=0,
fun=objective.f, grad=objective.g,
lagrangian_grad=np.copy(objective.g),
constr=[c.fun.f for c in prepared_constraints],
jac=[c.fun.J for c in prepared_constraints],
constr_nfev=[0 for c in prepared_constraints],
constr_njev=[0 for c in prepared_constraints],
constr_nhev=[0 for c in prepared_constraints],
v=[c.fun.v for c in prepared_constraints],
method=method)
# Start counting
start_time = time.time()
# Define stop criteria
if method == 'equality_constrained_sqp':
def stop_criteria(state, x, last_iteration_failed,
optimality, constr_violation,
tr_radius, constr_penalty, cg_info):
state = update_state_sqp(state, x, last_iteration_failed,
objective, prepared_constraints,
start_time, tr_radius, constr_penalty,
cg_info)
if verbose == 2:
BasicReport.print_iteration(state.nit,
state.nfev,
state.cg_niter,
state.fun,
state.tr_radius,
state.optimality,
state.constr_violation)
elif verbose > 2:
SQPReport.print_iteration(state.nit,
state.nfev,
state.cg_niter,
state.fun,
state.tr_radius,
state.optimality,
state.constr_violation,
state.constr_penalty,
state.cg_stop_cond)
state.status = None
state.niter = state.nit # Alias for callback (backward-compatibility)
if callback is not None and callback(np.copy(state.x), state):
state.status = 3
elif state.optimality < gtol and state.constr_violation < gtol:
state.status = 1
elif state.tr_radius < xtol:
state.status = 2
elif state.nit >= maxiter:
state.status = 0
return state.status in (0, 1, 2, 3)
elif method == 'tr_interior_point':
def stop_criteria(state, x, last_iteration_failed, tr_radius,
constr_penalty, cg_info, barrier_parameter,
barrier_tolerance):
state = update_state_ip(state, x, last_iteration_failed,
objective, prepared_constraints,
start_time, tr_radius, constr_penalty,
cg_info, barrier_parameter, barrier_tolerance)
if verbose == 2:
BasicReport.print_iteration(state.nit,
state.nfev,
state.cg_niter,
state.fun,
state.tr_radius,
state.optimality,
state.constr_violation)
elif verbose > 2:
IPReport.print_iteration(state.nit,
state.nfev,
state.cg_niter,
state.fun,
state.tr_radius,
state.optimality,
state.constr_violation,
state.constr_penalty,
state.barrier_parameter,
state.cg_stop_cond)
state.status = None
state.niter = state.nit # Alias for callback (backward compatibility)
if callback is not None and callback(np.copy(state.x), state):
state.status = 3
elif state.optimality < gtol and state.constr_violation < gtol:
state.status = 1
elif (state.tr_radius < xtol
and state.barrier_parameter < barrier_tol):
state.status = 2
elif state.nit >= maxiter:
state.status = 0
return state.status in (0, 1, 2, 3)
if verbose == 2:
BasicReport.print_header()
elif verbose > 2:
if method == 'equality_constrained_sqp':
SQPReport.print_header()
elif method == 'tr_interior_point':
IPReport.print_header()
# Call inferior function to do the optimization
if method == 'equality_constrained_sqp':
def fun_and_constr(x):
f = objective.fun(x)
c_eq, _ = canonical.fun(x)
return f, c_eq
def grad_and_jac(x):
g = objective.grad(x)
J_eq, _ = canonical.jac(x)
return g, J_eq
_, result = equality_constrained_sqp(
fun_and_constr, grad_and_jac, lagrangian_hess,
x0, objective.f, objective.g,
c_eq0, J_eq0,
stop_criteria, state,
initial_constr_penalty, initial_tr_radius,
factorization_method)
elif method == 'tr_interior_point':
_, result = tr_interior_point(
objective.fun, objective.grad, lagrangian_hess,
n_vars, canonical.n_ineq, canonical.n_eq,
canonical.fun, canonical.jac,
x0, objective.f, objective.g,
c_ineq0, J_ineq0, c_eq0, J_eq0,
stop_criteria,
canonical.keep_feasible,
xtol, state, initial_barrier_parameter,
initial_barrier_tolerance,
initial_constr_penalty, initial_tr_radius,
factorization_method)
# Status 3 occurs when the callback function requests termination,
# this is assumed to not be a success.
result.success = True if result.status in (1, 2) else False
result.message = TERMINATION_MESSAGES[result.status]
# Alias (for backward compatibility with 1.1.0)
result.niter = result.nit
if verbose == 2:
BasicReport.print_footer()
elif verbose > 2:
if method == 'equality_constrained_sqp':
SQPReport.print_footer()
elif method == 'tr_interior_point':
IPReport.print_footer()
if verbose >= 1:
print(result.message)
print("Number of iterations: {}, function evaluations: {}, "
"CG iterations: {}, optimality: {:.2e}, "
"constraint violation: {:.2e}, execution time: {:4.2} s."
.format(result.nit, result.nfev, result.cg_niter,
result.optimality, result.constr_violation,
result.execution_time))
return result
|
|
"""Support for Z-Wave door locks."""
import logging
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.components.lock import DOMAIN, LockDevice
from homeassistant.components import zwave
from homeassistant.helpers.dispatcher import async_dispatcher_connect
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
ATTR_NOTIFICATION = 'notification'
ATTR_LOCK_STATUS = 'lock_status'
ATTR_CODE_SLOT = 'code_slot'
ATTR_USERCODE = 'usercode'
CONFIG_ADVANCED = 'Advanced'
SERVICE_SET_USERCODE = 'set_usercode'
SERVICE_GET_USERCODE = 'get_usercode'
SERVICE_CLEAR_USERCODE = 'clear_usercode'
POLYCONTROL = 0x10E
DANALOCK_V2_BTZE = 0x2
POLYCONTROL_DANALOCK_V2_BTZE_LOCK = (POLYCONTROL, DANALOCK_V2_BTZE)
WORKAROUND_V2BTZE = 1
WORKAROUND_DEVICE_STATE = 2
WORKAROUND_TRACK_MESSAGE = 4
WORKAROUND_ALARM_TYPE = 8
DEVICE_MAPPINGS = {
POLYCONTROL_DANALOCK_V2_BTZE_LOCK: WORKAROUND_V2BTZE,
# Kwikset 914TRL ZW500
(0x0090, 0x440): WORKAROUND_DEVICE_STATE,
(0x0090, 0x446): WORKAROUND_DEVICE_STATE,
# Yale YRD210, Yale YRD240
(0x0129, 0x0209): WORKAROUND_DEVICE_STATE | WORKAROUND_ALARM_TYPE,
(0x0129, 0xAA00): WORKAROUND_DEVICE_STATE,
# Yale YRL220/YRD220
(0x0129, 0x0000): WORKAROUND_DEVICE_STATE | WORKAROUND_ALARM_TYPE,
# Yale YRD120
(0x0129, 0x0800): WORKAROUND_DEVICE_STATE | WORKAROUND_ALARM_TYPE,
# Yale YRD220 (as reported by adrum in PR #17386)
(0x0109, 0x0000): WORKAROUND_DEVICE_STATE | WORKAROUND_ALARM_TYPE,
# Schlage BE469
(0x003B, 0x5044): WORKAROUND_DEVICE_STATE | WORKAROUND_TRACK_MESSAGE,
# Schlage FE599NX
(0x003B, 0x504C): WORKAROUND_DEVICE_STATE,
}
LOCK_NOTIFICATION = {
'1': 'Manual Lock',
'2': 'Manual Unlock',
'5': 'Keypad Lock',
'6': 'Keypad Unlock',
'11': 'Lock Jammed',
'254': 'Unknown Event'
}
NOTIFICATION_RF_LOCK = '3'
NOTIFICATION_RF_UNLOCK = '4'
LOCK_NOTIFICATION[NOTIFICATION_RF_LOCK] = 'RF Lock'
LOCK_NOTIFICATION[NOTIFICATION_RF_UNLOCK] = 'RF Unlock'
LOCK_ALARM_TYPE = {
'9': 'Deadbolt Jammed',
'16': 'Unlocked by Bluetooth ',
'18': 'Locked with Keypad by user ',
'19': 'Unlocked with Keypad by user ',
'21': 'Manually Locked ',
'22': 'Manually Unlocked ',
'27': 'Auto re-lock',
'33': 'User deleted: ',
'112': 'Master code changed or User added: ',
'113': 'Duplicate Pin-code: ',
'130': 'RF module, power restored',
'144': 'Unlocked by NFC Tag or Card by user ',
'161': 'Tamper Alarm: ',
'167': 'Low Battery',
'168': 'Critical Battery Level',
'169': 'Battery too low to operate'
}
ALARM_RF_LOCK = '24'
ALARM_RF_UNLOCK = '25'
LOCK_ALARM_TYPE[ALARM_RF_LOCK] = 'Locked by RF'
LOCK_ALARM_TYPE[ALARM_RF_UNLOCK] = 'Unlocked by RF'
MANUAL_LOCK_ALARM_LEVEL = {
'1': 'by Key Cylinder or Inside thumb turn',
'2': 'by Touch function (lock and leave)'
}
TAMPER_ALARM_LEVEL = {
'1': 'Too many keypresses',
'2': 'Cover removed'
}
LOCK_STATUS = {
'1': True,
'2': False,
'3': True,
'4': False,
'5': True,
'6': False,
'9': False,
'18': True,
'19': False,
'21': True,
'22': False,
'24': True,
'25': False,
'27': True
}
ALARM_TYPE_STD = [
'18',
'19',
'33',
'112',
'113',
'144'
]
SET_USERCODE_SCHEMA = vol.Schema({
vol.Required(zwave.const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(ATTR_CODE_SLOT): vol.Coerce(int),
vol.Required(ATTR_USERCODE): cv.string,
})
GET_USERCODE_SCHEMA = vol.Schema({
vol.Required(zwave.const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(ATTR_CODE_SLOT): vol.Coerce(int),
})
CLEAR_USERCODE_SCHEMA = vol.Schema({
vol.Required(zwave.const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(ATTR_CODE_SLOT): vol.Coerce(int),
})
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Old method of setting up Z-Wave locks."""
pass
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Z-Wave Lock from Config Entry."""
@callback
def async_add_lock(lock):
"""Add Z-Wave Lock."""
async_add_entities([lock])
async_dispatcher_connect(hass, 'zwave_new_lock', async_add_lock)
network = hass.data[zwave.const.DATA_NETWORK]
def set_usercode(service):
"""Set the usercode to index X on the lock."""
node_id = service.data.get(zwave.const.ATTR_NODE_ID)
lock_node = network.nodes[node_id]
code_slot = service.data.get(ATTR_CODE_SLOT)
usercode = service.data.get(ATTR_USERCODE)
for value in lock_node.get_values(
class_id=zwave.const.COMMAND_CLASS_USER_CODE).values():
if value.index != code_slot:
continue
if len(str(usercode)) < 4:
_LOGGER.error("Invalid code provided: (%s) "
"usercode must be atleast 4 and at most"
" %s digits",
usercode, len(value.data))
break
value.data = str(usercode)
break
def get_usercode(service):
"""Get a usercode at index X on the lock."""
node_id = service.data.get(zwave.const.ATTR_NODE_ID)
lock_node = network.nodes[node_id]
code_slot = service.data.get(ATTR_CODE_SLOT)
for value in lock_node.get_values(
class_id=zwave.const.COMMAND_CLASS_USER_CODE).values():
if value.index != code_slot:
continue
_LOGGER.info("Usercode at slot %s is: %s", value.index, value.data)
break
def clear_usercode(service):
"""Set usercode to slot X on the lock."""
node_id = service.data.get(zwave.const.ATTR_NODE_ID)
lock_node = network.nodes[node_id]
code_slot = service.data.get(ATTR_CODE_SLOT)
data = ''
for value in lock_node.get_values(
class_id=zwave.const.COMMAND_CLASS_USER_CODE).values():
if value.index != code_slot:
continue
for i in range(len(value.data)):
data += '\0'
i += 1
_LOGGER.debug('Data to clear lock: %s', data)
value.data = data
_LOGGER.info("Usercode at slot %s is cleared", value.index)
break
hass.services.async_register(
DOMAIN, SERVICE_SET_USERCODE, set_usercode,
schema=SET_USERCODE_SCHEMA)
hass.services.async_register(
DOMAIN, SERVICE_GET_USERCODE, get_usercode,
schema=GET_USERCODE_SCHEMA)
hass.services.async_register(
DOMAIN, SERVICE_CLEAR_USERCODE, clear_usercode,
schema=CLEAR_USERCODE_SCHEMA)
def get_device(node, values, **kwargs):
"""Create Z-Wave entity device."""
return ZwaveLock(values)
class ZwaveLock(zwave.ZWaveDeviceEntity, LockDevice):
"""Representation of a Z-Wave Lock."""
def __init__(self, values):
"""Initialize the Z-Wave lock device."""
zwave.ZWaveDeviceEntity.__init__(self, values, DOMAIN)
self._state = None
self._notification = None
self._lock_status = None
self._v2btze = None
self._state_workaround = False
self._track_message_workaround = False
self._previous_message = None
self._alarm_type_workaround = False
# Enable appropriate workaround flags for our device
# Make sure that we have values for the key before converting to int
if (self.node.manufacturer_id.strip() and
self.node.product_id.strip()):
specific_sensor_key = (int(self.node.manufacturer_id, 16),
int(self.node.product_id, 16))
if specific_sensor_key in DEVICE_MAPPINGS:
workaround = DEVICE_MAPPINGS[specific_sensor_key]
if workaround & WORKAROUND_V2BTZE:
self._v2btze = 1
_LOGGER.debug("Polycontrol Danalock v2 BTZE "
"workaround enabled")
if workaround & WORKAROUND_DEVICE_STATE:
self._state_workaround = True
_LOGGER.debug(
"Notification device state workaround enabled")
if workaround & WORKAROUND_TRACK_MESSAGE:
self._track_message_workaround = True
_LOGGER.debug("Message tracking workaround enabled")
if workaround & WORKAROUND_ALARM_TYPE:
self._alarm_type_workaround = True
_LOGGER.debug(
"Alarm Type device state workaround enabled")
self.update_properties()
def update_properties(self):
"""Handle data changes for node values."""
self._state = self.values.primary.data
_LOGGER.debug("lock state set to %s", self._state)
if self.values.access_control:
notification_data = self.values.access_control.data
self._notification = LOCK_NOTIFICATION.get(str(notification_data))
if self._state_workaround:
self._state = LOCK_STATUS.get(str(notification_data))
_LOGGER.debug("workaround: lock state set to %s", self._state)
if self._v2btze:
if self.values.v2btze_advanced and \
self.values.v2btze_advanced.data == CONFIG_ADVANCED:
self._state = LOCK_STATUS.get(str(notification_data))
_LOGGER.debug(
"Lock state set from Access Control value and is %s, "
"get=%s", str(notification_data), self.state)
if self._track_message_workaround:
this_message = self.node.stats['lastReceivedMessage'][5]
if this_message == zwave.const.COMMAND_CLASS_DOOR_LOCK:
self._state = self.values.primary.data
_LOGGER.debug("set state to %s based on message tracking",
self._state)
if self._previous_message == \
zwave.const.COMMAND_CLASS_DOOR_LOCK:
if self._state:
self._notification = \
LOCK_NOTIFICATION[NOTIFICATION_RF_LOCK]
self._lock_status = \
LOCK_ALARM_TYPE[ALARM_RF_LOCK]
else:
self._notification = \
LOCK_NOTIFICATION[NOTIFICATION_RF_UNLOCK]
self._lock_status = \
LOCK_ALARM_TYPE[ALARM_RF_UNLOCK]
return
self._previous_message = this_message
if not self.values.alarm_type:
return
alarm_type = self.values.alarm_type.data
if self.values.alarm_level:
alarm_level = self.values.alarm_level.data
else:
alarm_level = None
if not alarm_type:
return
if self._alarm_type_workaround:
self._state = LOCK_STATUS.get(str(alarm_type))
_LOGGER.debug("workaround: lock state set to %s -- alarm type: %s",
self._state, str(alarm_type))
if alarm_type == 21:
self._lock_status = '{}{}'.format(
LOCK_ALARM_TYPE.get(str(alarm_type)),
MANUAL_LOCK_ALARM_LEVEL.get(str(alarm_level)))
return
if str(alarm_type) in ALARM_TYPE_STD:
self._lock_status = '{}{}'.format(
LOCK_ALARM_TYPE.get(str(alarm_type)), str(alarm_level))
return
if alarm_type == 161:
self._lock_status = '{}{}'.format(
LOCK_ALARM_TYPE.get(str(alarm_type)),
TAMPER_ALARM_LEVEL.get(str(alarm_level)))
return
if alarm_type != 0:
self._lock_status = LOCK_ALARM_TYPE.get(str(alarm_type))
return
@property
def is_locked(self):
"""Return true if device is locked."""
return self._state
def lock(self, **kwargs):
"""Lock the device."""
self.values.primary.data = True
def unlock(self, **kwargs):
"""Unlock the device."""
self.values.primary.data = False
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
data = super().device_state_attributes
if self._notification:
data[ATTR_NOTIFICATION] = self._notification
if self._lock_status:
data[ATTR_LOCK_STATUS] = self._lock_status
return data
|
|
"""
Base class for Level Editor
You should write your own LevelEditor class inheriting this.
Refer LevelEditor.py for example.
"""
from direct.showbase.DirectObject import *
from direct.directtools.DirectUtil import *
from direct.gui.DirectGui import *
from .CurveEditor import *
from .FileMgr import *
from .ActionMgr import *
from .MayaConverter import *
class LevelEditorBase(DirectObject):
""" Base Class for Panda3D LevelEditor """
def __init__(self):
#loadPrcFileData('startup', 'window-type none')
self.currentFile = None
self.fNeedToSave = False
self.actionEvents = []
#self.objectMgr = ObjectMgr(self)
self.curveEditor = CurveEditor(self)
self.fileMgr = FileMgr(self)
self.actionMgr = ActionMgr()
self.fMoveCamera = False
self.NPParent = render
# define your own config file in inherited class
self.settingsFile = None
# you can show/hide specific properties by using propertiesMask and this mode
self.BASE_MODE = BitMask32.bit(0)
self.CREATE_CURVE_MODE = BitMask32.bit(2)
self.EDIT_CURVE_MODE = BitMask32.bit(3)
self.ANIM_MODE = BitMask32.bit(4)
self.GRAPH_EDITOR = False
self.mode = self.BASE_MODE
self.preMode = None
def initialize(self):
""" You should call this in your __init__ method of inherited LevelEditor class """
# specifiy what obj can be 'selected' as objects
base.direct.selected.addTag('OBJRoot')
self.actionEvents.extend([
# Node path events
('DIRECT-select', self.select),
('DIRECT-delete', self.handleDelete),
('DIRECT-preDeselectAll', self.deselectAll),
('DIRECT_deselectAll', self.deselectAllCB),
('preRemoveNodePath', self.removeNodePathHook),
('DIRECT_deselectedNodePath', self.deselectAllCB),
('DIRECT_selectedNodePath_fMulti_fTag_fLEPane', self.selectedNodePathHook),
('DIRECT_deselectAll', self.deselectAll),
('LE-Undo', self.actionMgr.undo),
('LE-Redo', self.actionMgr.redo),
('LE-Duplicate', self.objectMgr.duplicateSelected),
('DIRECT_manipulateObjectCleanup', self.cleanUpManipulating),
('LE-MakeLive', self.objectMgr.makeSelectedLive),
('LE-NewScene', self.ui.onNew),
('LE-SaveScene', self.ui.onSave),
('LE-OpenScene', self.ui.onOpen),
('LE-Quit', self.ui.quit),
('DIRECT-mouse1', self.handleMouse1),
('DIRECT-mouse1Up', self.handleMouse1Up),
('DIRECT-mouse2', self.handleMouse2),
('DIRECT-mouse2Up', self.handleMouse2Up),
('DIRECT-mouse3', self.handleMouse3),
('DIRECT-mouse3Up', self.handleMouse3Up),
('DIRECT-toggleWidgetVis', self.toggleWidget),
])
# Add all the action events
for event in self.actionEvents:
if len(event) == 3:
self.accept(event[0], event[1], event[2])
else:
self.accept(event[0], event[1])
# editor state text display such as edit mode
self.statusReadout = OnscreenText(
pos = (-1.2, 0.9), bg=Vec4(1,1,1,1),
scale = 0.05, align = TextNode.ALeft,
mayChange = 1, font = TextNode.getDefaultFont())
self.statusReadout.setText("")
# Make sure readout is never lit or drawn in wireframe
useDirectRenderStyle(self.statusReadout)
self.statusReadout.reparentTo(hidden)
self.statusLines = []
taskMgr.doMethodLater(5, self.updateStatusReadoutTimeouts, 'updateStatus')
self.loadSettings()
self.reset()
def setTitleWithFilename(self, filename=""):
title = self.ui.appname
if filename != "":
filenameshort = os.path.basename(filename)
title = title + " (%s)"%filenameshort
self.ui.SetLabel(title)
def removeNodePathHook(self, nodePath):
if nodePath is None:
return
base.direct.deselect(nodePath)
self.objectMgr.removeObjectByNodePath(nodePath)
if base.direct.selected.last is not None and nodePath == base.direct.selected.last:
# if base.direct.selected.last is refering to this
# removed obj, clear the reference
if (hasattr(__builtins__,'last')):
__builtins__.last = None
else:
__builtins__['last'] = None
base.direct.selected.last = None
def toggleWidget(self):
if self.objectMgr.currNodePath:
obj = self.objectMgr.findObjectByNodePath(self.objectMgr.currNodePath)
if obj and not obj[OG.OBJ_DEF].movable:
return
base.direct.toggleWidgetVis()
def handleMouse1(self, modifiers):
if base.direct.fAlt or modifiers == 4:
self.fMoveCamera = True
return
if self.mode == self.CREATE_CURVE_MODE :
self.curveEditor.createCurve()
def handleMouse1Up(self):
self.fMoveCamera = False
def handleMouse2(self, modifiers):
if base.direct.fAlt or modifiers == 4:
self.fMoveCamera = True
return
def handleMouse2Up(self):
self.fMoveCamera = False
def handleMouse3(self, modifiers):
if base.direct.fAlt or modifiers == 4:
self.fMoveCamera = True
return
self.ui.onRightDown()
def handleMouse3Up(self):
self.fMoveCamera = False
def handleDelete(self):
oldSelectedNPs = base.direct.selected.getSelectedAsList()
oldUIDs = []
for oldNP in oldSelectedNPs:
obj = self.objectMgr.findObjectByNodePath(oldNP)
if obj:
oldUIDs.append(obj[OG.OBJ_UID])
action = ActionDeleteObj(self)
self.actionMgr.push(action)
action()
for uid in oldUIDs:
self.ui.sceneGraphUI.delete(uid)
## reply = wx.MessageBox("Do you want to delete selected?", "Delete?",
## wx.YES_NO | wx.ICON_QUESTION)
## if reply == wx.YES:
## base.direct.removeAllSelected()
## else:
## # need to reset COA
## dnp = base.direct.selected.last
## # Update camera controls coa to this point
## # Coa2Camera = Coa2Dnp * Dnp2Camera
## mCoa2Camera = dnp.mCoa2Dnp * dnp.getMat(base.direct.camera)
## row = mCoa2Camera.getRow(3)
## coa = Vec3(row[0], row[1], row[2])
## base.direct.cameraControl.updateCoa(coa)
def cleanUpManipulating(self, selectedNPs):
for np in selectedNPs:
obj = self.objectMgr.findObjectByNodePath(np)
if obj:
action = ActionTransformObj(self, obj[OG.OBJ_UID], Mat4(np.getMat()))
self.actionMgr.push(action)
action()
def select(self, nodePath, fMultiSelect=0, fSelectTag=1, fResetAncestry=1, fLEPane=0, fUndo=1):
if fUndo:
# Select tagged object if present
if fSelectTag:
for tag in base.direct.selected.tagList:
if nodePath.hasNetTag(tag):
nodePath = nodePath.findNetTag(tag)
break
action = ActionSelectObj(self, nodePath, fMultiSelect)
self.actionMgr.push(action)
action()
else:
base.direct.selectCB(nodePath, fMultiSelect, fSelectTag, fResetAncestry, fLEPane, fUndo)
def selectedNodePathHook(self, nodePath, fMultiSelect = 0, fSelectTag = 1, fLEPane = 0):
# handle unpickable nodepath
if nodePath.getName() in base.direct.iRay.unpickable:
base.direct.deselect(nodePath)
return
if fMultiSelect == 0 and fLEPane == 0:
oldSelectedNPs = base.direct.selected.getSelectedAsList()
for oldNP in oldSelectedNPs:
obj = self.objectMgr.findObjectByNodePath(oldNP)
if obj:
self.ui.sceneGraphUI.deSelect(obj[OG.OBJ_UID])
self.objectMgr.selectObject(nodePath, fLEPane)
self.ui.buildContextMenu(nodePath)
if self.mode == self.EDIT_CURVE_MODE:
taskMgr.add(self.curveEditor.editCurve, "modify")
self.curveEditor.accept("DIRECT-enter", self.curveEditor.onBaseMode)
def deselectAll(self, np=None):
if len(base.direct.selected.getSelectedAsList()) ==0:
return
action = ActionDeselectAll(self)
self.actionMgr.push(action)
action()
def deselectAllCB(self, dnp=None):
self.objectMgr.deselectAll()
def reset(self):
if self.fNeedToSave:
reply = wx.MessageBox("Do you want to save current scene?", "Save?",
wx.YES_NO | wx.ICON_QUESTION)
if reply == wx.YES:
result = self.ui.onSave()
if result == False:
return
base.direct.deselectAll()
base.direct.selected.last = None
self.ui.reset()
self.objectMgr.reset()
self.animMgr.reset()
self.actionMgr.reset()
self.ui.perspView.camera.setPos(-19, -19, 19)
self.ui.perspView.camera.lookAt(Point3(0, 0, 0))
self.ui.leftView.camera.setPos(600, 0, 0)
self.ui.frontView.camera.setPos(0, -600, 0)
self.ui.topView.camera.setPos(0, 0, 600)
self.resetOrthoCam(self.ui.topView)
self.resetOrthoCam(self.ui.frontView)
self.resetOrthoCam(self.ui.leftView)
self.fNeedToSave = False
self.setTitleWithFilename()
def resetOrthoCam(self, view):
base.direct.drList[base.camList.index(NodePath(view.camNode))].orthoFactor = 0.1
x = view.ClientSize.GetWidth() * 0.1
y = view.ClientSize.GetHeight() * 0.1
view.camLens.setFilmSize(x, y)
def save(self):
self.ui.SetCursor(wx.StockCursor(wx.CURSOR_WAIT))
if self.currentFile:
self.fileMgr.saveToFile(self.currentFile)
self.ui.SetCursor(wx.StockCursor(wx.CURSOR_ARROW))
def saveAs(self, fileName):
self.ui.SetCursor(wx.StockCursor(wx.CURSOR_WAIT))
self.fileMgr.saveToFile(fileName)
self.currentFile = fileName
self.ui.SetCursor(wx.StockCursor(wx.CURSOR_ARROW))
def load(self, fileName):
self.ui.SetCursor(wx.StockCursor(wx.CURSOR_WAIT))
self.reset()
self.fileMgr.loadFromFile(fileName)
self.currentFile = fileName
self.ui.SetCursor(wx.StockCursor(wx.CURSOR_ARROW))
def saveSettings(self):
if self.settingsFile is None:
return
try:
f = open(self.settingsFile, 'w')
f.write('gridSize\n%f\n'%self.ui.perspView.grid.gridSize)
f.write('gridSpacing\n%f\n'%self.ui.perspView.grid.gridSpacing)
f.write('hotKey\n%s\n'%base.direct.hotKeyMap)
f.close()
except:
pass
def loadSettings(self):
if self.settingsFile is None:
return
self.ui.SetCursor(wx.StockCursor(wx.CURSOR_WAIT))
try:
f = open(self.settingsFile, 'r')
configLines = f.readlines()
f.close()
gridSize = 100.0
gridSpacing = 5.0
for i in range(0, len(configLines)):
line = configLines[i]
i = i + 1
if line.startswith('gridSize'):
gridSize = float(configLines[i])
elif line.startswith('gridSpacing'):
gridSpacing = float(configLines[i])
elif line.startswith('hotKey'):
customHotKeyMap = eval(configLines[i])
customHotKeyDict = {}
for hotKey in customHotKeyMap.keys():
desc = customHotKeyMap[hotKey]
customHotKeyDict[desc[1]] = hotKey
overriddenKeys = []
for key in base.direct.hotKeyMap.keys():
desc = base.direct.hotKeyMap[key]
if desc[1] in customHotKeyDict.keys():
overriddenKeys.append(key)
for key in overriddenKeys:
del base.direct.hotKeyMap[key]
base.direct.hotKeyMap.update(customHotKeyMap)
self.ui.updateGrids(gridSize, gridSpacing)
self.ui.updateMenu()
except:
pass
self.ui.SetCursor(wx.StockCursor(wx.CURSOR_ARROW))
def convertMaya(self, modelname, callBack, obj=None, isAnim=False):
if obj and isAnim:
mayaConverter = MayaConverter(self.ui, self, modelname, callBack, obj, isAnim)
else:
reply = wx.MessageBox("Is it an animation file?", "Animation?",
wx.YES_NO | wx.ICON_QUESTION)
if reply == wx.YES:
mayaConverter = MayaConverter(self.ui, self, modelname, callBack, None, True)
else:
mayaConverter = MayaConverter(self.ui, self, modelname, callBack, None, False)
mayaConverter.Show()
def convertFromMaya(self, modelname, callBack):
mayaConverter = MayaConverter(self.ui, self, modelname, callBack, None, False)
mayaConverter.Show()
def exportToMaya(self, mayaFileName):
exportRootNP = render
self.exportToMayaCB(mayaFileName, exportRootNP)
def exportToMayaCB(self, mayaFileName, exportRootNP):
bamFileName = mayaFileName + ".bam"
if base.direct.selected.last:
obj = self.objectMgr.findObjectByNodePath(base.direct.selected.last)
if obj:
exportRootNP = obj[OG.OBJ_NP]
exportRootNP.writeBamFile(bamFileName)
mayaConverter = MayaConverter(self.ui, self, mayaFileName, None, None, False, FROM_BAM_TO_MAYA)
mayaConverter.Show()
def updateStatusReadout(self, status, color=None):
if status:
# add new status line, first check to see if it already exists
alreadyExists = False
for currLine in self.statusLines:
if (status == currLine[1]):
alreadyExists = True
break
if (alreadyExists == False):
time = globalClock.getRealTime() + 15
self.statusLines.append([time,status,color])
# update display of new status lines
self.statusReadout.reparentTo(aspect2d)
statusText = ""
lastColor = None
for currLine in self.statusLines:
statusText += currLine[1] + '\n'
lastColor = currLine[2]
self.statusReadout.setText(statusText)
if (lastColor):
self.statusReadout.textNode.setCardColor(
lastColor[0], lastColor[1], lastColor[2], lastColor[3])
self.statusReadout.textNode.setCardAsMargin(0.1, 0.1, 0.1, 0.1)
else:
self.statusReadout.textNode.setCardColor(1,1,1,1)
self.statusReadout.textNode.setCardAsMargin(0.1, 0.1, 0.1, 0.1)
def updateStatusReadoutTimeouts(self,task=None):
removalList = []
for currLine in self.statusLines:
if (globalClock.getRealTime() >= currLine[0]):
removalList.append(currLine)
for currRemoval in removalList:
self.statusLines.remove(currRemoval)
self.updateStatusReadout(None)
# perform doMethodLater again after delay
# This crashes when CTRL-C'ing, so this is a cheap hack.
#return 2
from direct.task import Task
return Task.again
def propMeetsReq(self, typeName, parentNP):
if self.ui.parentToSelectedMenuItem.IsChecked():
if base.direct.selected.last:
parent = base.le.objectMgr.findObjectByNodePath(base.direct.selected.last)
if parent:
parentNP[0] = parent[OG.OBJ_NP]
else:
parentNP[0] = None
return True
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.