gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# Copyright 2013 Philip N. Klein
""" A module for working with images in matrix format.
An image is represented by two matrices, representing points and colors.
The points matrix has row labels {'x', 'y', 'u'} and column labels (0,0) through (w, h), inclusive,
where (w, h) are the width and height of the original image
The colors matrix has row labels {'r', 'g', 'b'} and column labels (0,0) through (w-h, h-1).
The column labels for these matrices represent "lattice points" on the original image.
For pixel (i,j) in the original image, the (i,j) column in the colors matrix represents
the pixel color and the (i,j), (i+1, j), (i+1, j+1), and (i, j+1) columns in the points
matrix represent the boundary of the pixel region
"""
import mat
import png
import numbers
import collections
import webbrowser
import tempfile
import os
import atexit
import math
# Round color coordinate to nearest int and clamp to [0, 255]
def _color_int(col):
return max(min(round(col), 255), 0)
# utility conversions, between boxed pixel and flat pixel formats
# the png library uses flat, we use boxed.
def _boxed2flat(row):
return [_color_int(x) for box in row for x in box]
def _flat2boxed(row):
# Note we skip every 4th element, thus eliminating the alpha channel
return [tuple(row[i:i+3]) for i in range(0, len(row), 4)]
## Image conversions
def isgray(image):
"tests whether the image is grayscale"
col = image[0][0]
if isinstance(col, numbers.Number):
return True
elif isinstance(col, collections.Iterable) and len(col) == 3:
return False
else:
raise TypeError('Unrecognized image type')
def color2gray(image):
""" Converts a color image to grayscale """
# we use HDTV grayscale conversion as per https://en.wikipedia.org/wiki/Grayscale
return [[int(0.2126*p[0] + 0.7152*p[1] + 0.0722*p[2]) for p in row]
for row in image]
def gray2color(image):
""" Converts a grayscale image to color """
return [[(p,p,p) for p in row] for row in image]
#extracting and combining color channels
def rgbsplit(image):
""" Converts an RGB image to a 3-element list of grayscale images, one for each color channel"""
return [[[pixel[i] for pixel in row] for row in image] for i in (0,1,2)]
def rgpsplice(R,G,B):
return [[(R[row][col],G[row][col],B[row][col]) for col in range(len(R[0]))] for row in range(len(R))]
## To and from files
def file2image(path):
""" Reads an image into a list of lists of pixel values (tuples with
three values). This is a color image. """
(w, h, p, m) = png.Reader(filename = path).asRGBA() # force RGB and alpha
return [_flat2boxed(r) for r in p]
def image2file(image, path):
""" Writes an image in list of lists format to a file. Will work with
either color or grayscale. """
if isgray(image):
img = gray2color(image)
else:
img = image
with open(path, 'wb') as f:
png.Writer(width=len(image[0]), height=len(image)).write(f,
[_boxed2flat(r) for r in img])
## Display functions
def image2display(image, browser=None):
""" Stores an image in a temporary location and displays it on screen
using a web browser. """
path = _create_temp('.png')
image2file(image, path)
hpath = _create_temp('.html')
with open(hpath, 'w') as h:
h.writelines(["<html><body><img src='file://%s'/></body></html>" % path])
openinbrowser('file://%s' % hpath, browser)
print("Hit Enter once the image is displayed.... ", end="")
input()
_browser = None
def setbrowser(browser=None):
""" Registers the given browser and saves it as the module default.
This is used to control which browser is used to display the plot.
The argument should be a value that can be passed to webbrowser.get()
to obtain a browser. If no argument is given, the default is reset
to the system default.
webbrowser provides some predefined browser names, including:
'firefox'
'opera'
If the browser string contains '%s', it is interpreted as a literal
browser command line. The URL will be substituted for '%s' in the command.
For example:
'google-chrome %s'
'cmd "start iexplore.exe %s"'
See the webbrowser documentation for more detailed information.
Note: Safari does not reliably work with the webbrowser module,
so we recommend using a different browser.
"""
global _browser
if browser is None:
_browser = None # Use system default
else:
webbrowser.register(browser, None, webbrowser.get(browser))
_browser = browser
def getbrowser():
""" Returns the module's default browser """
return _browser
def openinbrowser(url, browser=None):
if browser is None:
browser = _browser
webbrowser.get(browser).open(url)
# Create a temporary file that will be removed at exit
# Returns a path to the file
def _create_temp(suffix='', prefix='tmp', dir=None):
_f, path = tempfile.mkstemp(suffix, prefix, dir)
os.close(_f)
_remove_at_exit(path)
return path
# Register a file to be removed at exit
def _remove_at_exit(path):
atexit.register(os.remove, path)
def file2mat(path, row_labels = ('x', 'y', 'u')):
"""input: a filepath to an image in .png format
output: the pair (points, matrix) of matrices representing the image."""
return image2mat(file2image(path), row_labels)
def image2mat(img, row_labels = ('x', 'y', 'u')):
""" input: an image in list-of-lists format
output: a pair (points, colors) of matrices representing the image.
Note: The input list-of-lists can consist of either integers n [0, 255] for grayscale
or 3-tuple of integers representing the rgb color coordinates
"""
h = len(img)
w = len(img[0])
rx, ry, ru = row_labels
ptsD = (set(row_labels), {(x,y) for x in range(w+1) for y in range(h+1)})
ptsF = {}
colorsD = ({'r', 'g', 'b'}, {(x,y) for x in range(w) for y in range(h)})
colorsF = {}
for y in range(h+1):
for x in range(w+1):
pt = (x,y)
ptsF[(rx, pt)] = x
ptsF[(ry, pt)] = y
ptsF[(ru, pt)] = 1
if x < w and y < h:
col = img[y][x]
if type(col) is int:
red, green, blue = col, col, col
else:
red, green, blue = col
colorsF[('r', pt)] = red
colorsF[('g', pt)] = green
colorsF[('b', pt)] = blue
return mat.Mat(ptsD, ptsF), mat.Mat(colorsD, colorsF)
def mat2display(pts, colors, row_labels = ('x', 'y', 'u'),
scale=1, xscale=None, yscale = None, xmin=0, ymin=0, xmax=None, ymax=None,
crosshairs=False, browser=None):
""" input: matrix pts and matrix colors representing an image
result: Displays the image in a web browser
Optional arguments:
row_labels - A collection specifying the points matrix row labels,
in order. The first element of this collection is considered the x
coordinate, the second is the y coordinate, and the third is the u
coordinate, which is assumed to be 1 for all points.
scale - The display scale, in pixels per image coordinate unit
xscale, yscale - in case you want to scale differently in x and y
xmin, ymin, xmax, ymax - The region of the image to display. These can
be set to None to use the minimum/maximum value of the coordinate
instead of a fixed value.
crosshairs - Setting this to true displays a crosshairs at (0, 0) in
image coordinates
browser - A browser string to be passed to webbrowser.get().
Overrides the module default, if any has been set.
"""
if xscale == None: xscale = scale
if yscale == None: yscale = scale
rx, ry, ru = row_labels
if ymin is None:
ymin = min(v for (k, v) in pts.f.items() if k[0] == ry)
if xmin is None:
xmin = min(v for (k, v) in pts.f.items() if k[0] == rx)
if ymax is None:
ymax = max(v for (k, v) in pts.f.items() if k[0] == ry)
if xmax is None:
xmax = max(v for (k, v) in pts.f.items() if k[0] == rx)
# Include (0, 0) in the region
if crosshairs:
ymin = min(ymin, 0)
xmin = min(xmin, 0)
ymax = max(ymax, 0)
xmax = max(xmax, 0)
hpath = _create_temp('.html')
with open(hpath, 'w') as h:
h.writelines(
['<!DOCTYPE html>\n',
'<head> <title>image</title> </head>\n',
'<body>\n',
'<svg height="%s" width="%s" xmlns="http://www.w3.org/2000/svg">\n' % ((ymax-ymin) * yscale, (xmax-xmin) * xscale),
'<g transform="scale(%s) translate(%s, %s) ">\n' % (scale, -xmin, -ymin)])
pixels = sorted(colors.D[1])
Mx, My = pixels[-1]
# go through the quads, writing each one to canvas
for l in pixels:
lx, ly = l
r = _color_int(colors[('r', l)])
g = _color_int(colors[('g', l)])
b = _color_int(colors[('b', l)])
mx = min(lx+1, Mx)+1
my = min(ly+1, My)+1
# coords of corners
x0 = pts[(rx, l)]
y0 = pts[(ry, l)]
x1 = pts[(rx, (mx, ly))]
y1 = pts[(ry, (mx, ly))]
x2 = pts[(rx, (mx, my))]
y2 = pts[(ry, (mx, my))]
x3 = pts[(rx, (lx, my))]
y3 = pts[(ry, (lx, my))]
h.writelines(['<polygon points="%s, %s %s, %s, %s, %s %s, %s" fill="rgb(%s, %s, %s)" stroke="none" />\n'
% (x0, y0, x1, y1, x2, y2, x3, y3, r, g, b)])
# Draw crosshairs centered at (0, 0)
if crosshairs:
h.writelines(
['<line x1="-50" y1="0" x2="50" y2="0" stroke="black" />\n',
'<line x1="0" y1="-50" x2="0" y2="50" stroke="black" />\n',
'<circle cx="0" cy="0" r="50" style="stroke: black; fill: none;" />\n'])
h.writelines(['</g></svg>\n', '</body>\n', '</html>\n'])
openinbrowser('file://%s' % hpath, browser)
print("Hit Enter once the image is displayed.... ", end="")
input()
| |
import argparse
import os
from multiprocessing.pool import ThreadPool
import numpy as np
import piecewisecrf.helpers.io as io
def load_data(predictions_file, labels_file):
'''
Loads prediction and label data into numpy arrays
Parameters
----------
predictions_file: str
Path to the prediction file
labels_file: str
Path to the label file
Returns
-------
ret_val: tuple
labels array, predictions array
'''
labels = io.load_nparray_from_bin_file(labels_file, np.uint8)
predictions = io.load_nparray_from_bin_file(predictions_file, np.int16)
return labels, predictions
def get_filenames(predictions_dir, labels_dir, class_ids):
'''
Gets ground truth and prediction file names
Parameters
----------
predictions_dir: str
Path to the directory containing predicted images
labels_dir: str
Path to the directory containing ground truth files
class_ids: list
List of possible class ids
Returns
-------
ret_val: tuple
list of indices, list of file names, list of label file names, list of prediction file names
'''
label_file_names = os.listdir(labels_dir)
file_names = sorted(label_file_names)
label_file_names = sorted([os.path.join(labels_dir, f) for f in label_file_names])
prediction_file_names = os.listdir(predictions_dir)
prediction_file_names = sorted([os.path.join(predictions_dir, f) for f in prediction_file_names])
return (range(len(label_file_names)), file_names, label_file_names,
prediction_file_names, [class_ids] * len(label_file_names))
def evaluate_image(predicted, true):
'''
Calculate true positives, false positives and false negatives for one image
Parameters
----------
predicted: numpy array
Predicted labels
true: numpy array
Ground truth
Returns
-------
array: numpy array
True positives, False positive, false negatives
'''
[c, h, w] = predicted.shape
assert(true.shape[0] - 1 == c)
assert(true.shape[1] == h)
assert(true.shape[2] == w)
keep = np.where(1 - true[-1].flatten())[0]
predicted_flat = np.int32(np.reshape(predicted, [c, h * w])[:, keep])
true_flat = np.int32(np.reshape(true[:-1], [c, h * w])[:, keep])
tp = np.sum(predicted_flat * true_flat, axis=1)[None] # [1,c]
fp = np.sum(predicted_flat * (1 - true_flat), axis=1)[None] # [1,c]
fn = np.sum((1 - predicted_flat) * true_flat, axis=1)[None] # [1,c]
return np.vstack([tp, fp, fn]) # [3,c]
def run(zipped):
'''
Runs evaluation for one image
'''
i, file, l, p, class_ids = zipped
labels, predictions = load_data(p, l)
# no slice for negative class
predicted = np.vstack([(predictions == class_id)[None] for class_id in class_ids])
true = np.vstack([(labels == class_id)[None] for class_id in class_ids])
true = np.vstack([true, (labels > class_ids[-1])[None]]) # ignore don't care classes
return evaluate_image(predicted, true)[None]
def evaluate_segmentation(predictions_dir, labels_dir, dataset):
'''
Runs evaluation for all images
Parameters
----------
predictions_dir: str
Directory containing prediction files
labels_dir: str
Directory containing label files
dataset: Dataset
Dataset for which evaluation is being done
Returns
-------
ret_val: list
Per class iou, per pixel accuracy, segmentation statistics,
pixel accuracy, per class accuracy, mean iou
'''
class_ids = dataset.classes
zipped = zip(*get_filenames(predictions_dir, labels_dir, class_ids))
segmentation_stats = ThreadPool().map(run, zipped)
segmentation_stats = np.concatenate(segmentation_stats, axis=0)
aggregated_stats_class = np.float32(np.sum(segmentation_stats, axis=0)) # [3,c]
tpfn = np.sum(np.concatenate([aggregated_stats_class[0][None],
aggregated_stats_class[2][None]], axis=0), axis=0)
tpfp = np.sum(np.concatenate([aggregated_stats_class[0][None],
aggregated_stats_class[1][None]], axis=0), axis=0)
recall_per_class = aggregated_stats_class[0] / tpfn # [c]
precision_per_class = aggregated_stats_class[0] / tpfp # [c]
per_class_iou = aggregated_stats_class[0] / np.sum(aggregated_stats_class, axis=0) # [c]
per_class_iou = np.nan_to_num(per_class_iou)
aggregated_stats_all = np.sum(aggregated_stats_class, axis=1) # [3]
per_pixel_accuracy = aggregated_stats_all[0] / (aggregated_stats_all[0] + aggregated_stats_all[1])
real_per_pixel_accuracy = aggregated_stats_all[0] / (aggregated_stats_all[0] + aggregated_stats_all[2])
iou = 100.0 * per_class_iou.mean()
return [per_class_iou, per_pixel_accuracy, segmentation_stats,
precision_per_class, recall_per_class, iou]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Evaluates semantic segmentation")
parser.add_argument('predictions_dir', type=str,
help="Directory containing predicted files")
parser.add_argument('labels_dir', type=str,
help="Directory containing ground truth files")
possible_datasets = ['cityscapes', 'kitti']
parser.add_argument('dataset_name', type=str, choices=possible_datasets,
help='Name of the dataset used for evaluation')
args = parser.parse_args()
if args.dataset_name == possible_datasets[0]:
from piecewisecrf.datasets.cityscapes.cityscapes import CityscapesDataset
dataset = CityscapesDataset()
elif args.dataset_name == possible_datasets[1]:
from piecewisecrf.datasets.kitti.kitti import KittiDataset
dataset = KittiDataset()
results = evaluate_segmentation(args.predictions_dir, args.labels_dir, dataset)
print('IoU = {:.2f}'.format(results[5]))
print('Pixel Accuracy = {:.2f}'.format(100 * results[1]))
print('Recall = {:.2f}'.format(100 * results[4].mean()))
print('Precision = {}'.format(100 * results[3].mean()))
print('Per class IOU = {}'.format(100.0 * results[0]))
| |
'''Test the project collaboration route. With views.'''
from django.contrib.auth.models import User, Permission
from django.test import TestCase, Client
from django.test.testcases import SimpleTestCase, TransactionTestCase
from django.urls import reverse_lazy
from .models import Project, Task, Milestone, TaskComment, GetTaskAssignedUser, TaskProfile, PROJECT_VISIBLE_PRIVATE, PROJECT_VISIBLE_VISIBLE, TASK_PROFILE_PRIORITY_RESPONSIBLE_FULL
from reversion.models import Version
from ich_bau.profiles.models import GetUserNoticationsQ
from .test_svn_wrapper_consts import get_TEST_REPO_SVN_FILE
import shutil, tempfile, os
from project.repo_wrapper import *
TEST_ADMIN_USER_NAME = 'test_admin_user'
TEST_ADMIN_USER_EMAIL = 'test_admin_user@nothere.com'
TEST_ADMIN_USER_PW = 'test_admin_user_pw'
TEST_WORKER_USER_NAME = 'test_worker_user'
TEST_WORKER_USER_EMAIL = 'test_worker_user@nothere.com'
TEST_WORKER_USER_PW = 'test_worker_user_pw'
TEST_SELF_WORKER_USER_NAME = 'test_self_worker_user'
TEST_SELF_WORKER_USER_EMAIL = 'test_self_worker_user@nothere.com'
TEST_SELF_WORKER_USER_PW = 'test_self_worker_user_pw'
TEST_PROJECT_FULLNAME = 'TEST PROJECT FOR COLLABORATION #1 FULL NAME'
TEST_TASK_FULLNAME = 'TEST TASK FOR COLLABORATION #1 FULL NAME'
TEST_PROJECT_DESCRIPTION_1 = 'Project for collaboration'
TEST_TASK_FIRST_COMMENT = 'Hloo!'
TEST_TASK_FIRST_COMMENT_2 = 'Hello!'
TEST_TASK_SECOND_COMMENT = 'I''m here!'
class Project_Collaboration_View_Test_Client(TestCase):
def test_Project_Collaboration(self):
if not User.objects.filter( username = TEST_ADMIN_USER_NAME ).exists():
test_admin_user = User.objects.create_user( username = TEST_ADMIN_USER_NAME, password = TEST_ADMIN_USER_PW )
c_a = Client()
response = c_a.login( username = TEST_ADMIN_USER_NAME, password = TEST_ADMIN_USER_PW )
self.assertTrue( response )
self.assertEqual( Project.objects.count(), 0 )
# create new project with post
response = c_a.post( reverse_lazy('project:project_add'), { 'fullname' : TEST_PROJECT_FULLNAME, 'description' : TEST_PROJECT_DESCRIPTION_1, } )
# forbidden
self.assertEqual( response.status_code, 403 )
self.assertEqual( Project.objects.count(), 0 )
# need to add the permissions
add_project_permission = Permission.objects.get(codename='add_project')
test_admin_user.user_permissions.add( add_project_permission )
# create new project with post
response = c_a.post( reverse_lazy('project:project_add'), { 'fullname' : TEST_PROJECT_FULLNAME, 'private_type' : PROJECT_VISIBLE_PRIVATE, 'description' : TEST_PROJECT_DESCRIPTION_1, }, follow = True )
# we are redirected to new project page
self.assertContains(response, '<i class="fa fa-lock"></i>', status_code=200 )
# check project is created
self.assertEqual( Project.objects.count(), 1 )
test_project_1 = Project.objects.get(id=1)
self.assertEqual( test_project_1.is_member(test_admin_user), True )
if not User.objects.filter( username = TEST_WORKER_USER_NAME ).exists():
test_worker_user = User.objects.create_user( username = TEST_WORKER_USER_NAME, password = TEST_WORKER_USER_PW )
self.assertEqual( test_project_1.is_member(test_worker_user), False )
self.assertEqual( GetUserNoticationsQ( test_worker_user, True).count(), 0 )
c_w = Client()
response = c_w.login( username = TEST_WORKER_USER_NAME, password = TEST_WORKER_USER_PW )
self.assertTrue( response )
# add new user
response = c_a.post( reverse_lazy('project:member_add', args = (test_project_1.id,) ), { 'member_profile' : test_worker_user.profile.id } )
# we are redirected to project page
self.assertEqual( response.status_code, 302 )
# worker is still not a member
self.assertEqual( test_project_1.is_member(test_worker_user), False )
# but worker get the notification
self.assertEqual( GetUserNoticationsQ( test_worker_user, True).count(), 1 )
response = c_w.get( reverse_lazy('unread_notifications_view' ) )
self.assertContains(response, test_project_1.fullname, status_code=200 )
response = c_w.get( reverse_lazy('unread_notifications_view_by_type' ) )
self.assertContains(response, test_project_1.fullname, status_code=200 )
# check the notification
notification = GetUserNoticationsQ( test_worker_user, True).first()
self.assertEqual( notification.sender_user, test_admin_user)
self.assertEqual( notification.reciever_user, test_worker_user)
self.assertEqual( notification.msg_url, reverse_lazy('project:project_view_members', kwargs={ 'project_id': test_project_1.id} ) )
# visit the notification link
response = c_w.get( reverse_lazy('notification_read', args = (notification.id,) ) )
self.assertEqual( response.status_code, 302 )
self.assertEqual( GetUserNoticationsQ( test_worker_user, True).count(), 0 )
# member record 0 should not exist
response = c_w.get( reverse_lazy('project:member_accept', args = (0,) ) )
self.assertEqual( response.status_code, 404 )
member_id = test_project_1.GetMemberList().get( member_profile = test_worker_user.profile ).id
self.assertEqual( member_id, 2 )
response = c_w.get( reverse_lazy('project:member_accept', args = (member_id,) ) )
self.assertEqual( response.status_code, 302 )
self.assertEqual( test_project_1.is_member(test_worker_user), True )
# new user want to join
if not User.objects.filter( username = TEST_SELF_WORKER_USER_NAME ).exists():
test_self_worker_user = User.objects.create_user( username = TEST_SELF_WORKER_USER_NAME, password = TEST_SELF_WORKER_USER_PW )
self.assertEqual( test_project_1.is_member(test_self_worker_user), False )
self.assertEqual( GetUserNoticationsQ( test_self_worker_user, True).count(), 0 )
c_sw = Client()
response = c_sw.login( username = TEST_SELF_WORKER_USER_NAME, password = TEST_SELF_WORKER_USER_PW )
self.assertTrue( response )
self.assertEqual( GetUserNoticationsQ( test_admin_user, True).count(), 0 )
response = c_sw.post( reverse_lazy('project:member_want_join', args = (test_project_1.id,) ) )
# we are redirected to project page
self.assertEqual( response.status_code, 302 )
# self worker is still not a member
self.assertEqual( test_project_1.is_member(test_self_worker_user), False )
# self worker get NO notification
self.assertEqual( GetUserNoticationsQ( test_self_worker_user, True).count(), 0 )
# check the admin notification
self.assertEqual( GetUserNoticationsQ( test_admin_user, True).count(), 1 )
notification = GetUserNoticationsQ( test_admin_user, True).first()
self.assertEqual( notification.sender_user, test_self_worker_user )
self.assertEqual( notification.reciever_user, test_admin_user)
self.assertEqual( notification.msg_url, test_project_1.get_absolute_url() )
# visit the notification link
response = c_a.get( reverse_lazy('notification_read', args = (notification.id,) ) )
self.assertEqual( response.status_code, 302 )
self.assertEqual( GetUserNoticationsQ( test_admin_user, True).count(), 0 )
# accept the self joined
member_id = test_project_1.GetMemberList().get( member_profile = test_self_worker_user.profile ).id
self.assertEqual( member_id, 3 )
# try to accept new member from non admin user
response = c_w.get( reverse_lazy('project:team_accept', args = (member_id,) ) )
# non admin user is not allowed to accept the members
self.assertEqual( response.status_code, 403 )
self.assertEqual( test_project_1.is_member(test_self_worker_user), False )
# try to accept new member from admin user
response = c_a.get( reverse_lazy('project:team_accept', args = (member_id,) ) )
# admin user is allowed to accept the members
self.assertEqual( response.status_code, 302 )
# done - self joined user is accpeted
self.assertEqual( test_project_1.is_member(test_self_worker_user), True )
# create first task
self.assertEqual( Task.objects.count(), 0 )
# create first task
response = c_a.post( reverse_lazy('project:task_add', args = (test_project_1.id,) ), { 'fullname' : TEST_TASK_FULLNAME, } )
# we are redirected to new task page
self.assertEqual( response.status_code, 302 )
self.assertEqual( Task.objects.count(), 1 )
# get object
test_task_1 = Task.objects.get(id=1)
# check url
self.assertEqual( response.url, test_task_1.get_absolute_url() )
# check name
self.assertEqual( test_task_1.fullname, TEST_TASK_FULLNAME )
# check task is in project
self.assertEqual( test_task_1.project, test_project_1 )
# check task history
response = c_a.get( reverse_lazy('project:task_history', args = (test_task_1.id,) ) )
# check history records count
self.assertContains(response, TEST_TASK_FULLNAME, status_code=200 )
self.assertEqual( Version.objects.get_for_object( test_task_1 ).count(), 1 )
# check the task comments count - 0
self.assertEqual( test_task_1.get_comments().count(), 0 )
# post new comments from admin to unexisted task
response = c_a.post( reverse_lazy('project:task_view', args = (0,) ), { 'submit' : 'submit', 'comment' : 'sss' } )
self.assertEqual( response.status_code, 404 ) # should fail
# post new comments from admin
response = c_a.post( reverse_lazy('project:task_view', args = (test_task_1.id,) ), { 'submit' : 'submit', 'comment' : TEST_TASK_FIRST_COMMENT } )
self.assertEqual( response.status_code, 302 )
self.assertEqual( test_task_1.get_comments().count(), 1 )
comment_1 = test_task_1.get_comments().first()
self.assertEqual( comment_1.comment, TEST_TASK_FIRST_COMMENT )
self.assertEqual( Version.objects.get_for_object( comment_1 ).count(), 1 )
# edit comment from another user
response = c_w.post( reverse_lazy('project:edit_task_comment', args = (comment_1.id,) ), { 'submit' : 'submit', 'comment' : TEST_TASK_FIRST_COMMENT_2 } )
self.assertEqual( response.status_code, 404 )
# edit comment from author
response = c_a.post( reverse_lazy('project:edit_task_comment', args = (comment_1.id,) ), { 'submit' : 'submit', 'comment' : TEST_TASK_FIRST_COMMENT_2 } )
self.assertEqual( response.status_code, 302 )
comment_1.refresh_from_db()
self.assertEqual( comment_1.comment, TEST_TASK_FIRST_COMMENT_2 )
self.assertEqual( Version.objects.get_for_object( comment_1 ).count(), 2 )
# check comment history
response = c_a.get( reverse_lazy('project:task_comment_history', args = (comment_1.id,) ) )
self.assertContains(response, TEST_TASK_FIRST_COMMENT, status_code=200 )
self.assertEqual( Version.objects.get_for_object( comment_1 ).count(), 2 )
self.assertEqual( GetUserNoticationsQ( test_admin_user, True).count(), 0 )
self.assertEqual( GetUserNoticationsQ( test_worker_user, True).count(), 0 )
self.assertEqual( GetUserNoticationsQ( test_self_worker_user, True).count(), 0 )
response = c_w.post( reverse_lazy('project:task_view', args = (test_task_1.id,) ), { 'submit' : 'submit', 'comment' : TEST_TASK_SECOND_COMMENT } )
self.assertEqual( response.status_code, 302 )
self.assertEqual( test_task_1.get_comments().count(), 2 )
self.assertEqual( GetUserNoticationsQ( test_admin_user, True).count(), 1 )
self.assertEqual( GetUserNoticationsQ( test_worker_user, True).count(), 0 )
self.assertEqual( GetUserNoticationsQ( test_self_worker_user, True).count(), 0 )
# visit the notification link
notification = GetUserNoticationsQ( test_admin_user, True).first()
response = c_a.get( reverse_lazy('notification_read', args = (notification.id,) ) )
self.assertEqual( response.status_code, 302 )
self.assertEqual( GetUserNoticationsQ( test_admin_user, True).count(), 0 )
# edit task
self.assertEqual( Task.objects.count(), 1 )
self.assertEqual( GetTaskAssignedUser(test_task_1).count(), 0 )
response = c_a.post( reverse_lazy('project:add_user', args = (test_task_1.id,) ), { 'profile' : test_worker_user.profile.id, 'priority' : TASK_PROFILE_PRIORITY_RESPONSIBLE_FULL, } )
# we are redirected to task page
self.assertEqual( response.status_code, 302 )
test_task_1.refresh_from_db()
self.assertEqual( Task.objects.count(), 1 )
assignee = GetTaskAssignedUser(test_task_1)
self.assertEqual( assignee.count(), 1 )
self.assertEqual( assignee.filter( profile = test_worker_user.profile ).count(), 1 )
self.assertEqual( GetUserNoticationsQ( test_admin_user, True).count(), 0 )
self.assertEqual( GetUserNoticationsQ( test_worker_user, True).count(), 1 )
self.assertEqual( GetUserNoticationsQ( test_self_worker_user, True).count(), 0 )
class SVN_Repo_Client_Test(TransactionTestCase):
test_temp_dir = None
def setUp(self):
# Create a temporary directory
if not self.test_temp_dir:
self.test_temp_dir = tempfile.mkdtemp()
def tearDown(self):
# Remove the directory after the test
if self.test_temp_dir:
shutil.rmtree(self.test_temp_dir, True )
self.test_temp_dir = None
def test_SVN_repo_collaboration(self):
# test repo creation
path = os.path.join(self.test_temp_dir, '' )
with self.settings( REPO_SVN = get_TEST_REPO_SVN_FILE( path ) ):
self.assertTrue( settings.REPO_SVN.get('REPO_TYPE') == svn_file )
# user not logged in - project_create_repo is not called
c = Client()
response = c.get( reverse_lazy('project:project_create_repo', args = (0,) ) )
self.assertEqual( response.status_code, 302 )
if not User.objects.filter( username = TEST_ADMIN_USER_NAME ).exists():
test_admin_user = User.objects.create_user( username = TEST_ADMIN_USER_NAME, password = TEST_ADMIN_USER_PW )
c_a = Client()
response = c_a.login( username = TEST_ADMIN_USER_NAME, password = TEST_ADMIN_USER_PW )
self.assertTrue( response )
response = c_a.post( reverse_lazy('project:project_add'), { 'fullname' : TEST_PROJECT_FULLNAME, 'description' : TEST_PROJECT_DESCRIPTION_1, } )
# forbidden
self.assertEqual( response.status_code, 403 )
self.assertEqual( Project.objects.count(), 0 )
# need to add the permissions
add_project_permission = Permission.objects.get(codename='add_project')
test_admin_user.user_permissions.add( add_project_permission )
response = c_a.post( reverse_lazy('project:project_add'), { 'fullname' : TEST_PROJECT_FULLNAME, 'private_type' : PROJECT_VISIBLE_VISIBLE, 'description' : TEST_PROJECT_DESCRIPTION_1, } )
# we are redirected to new project page
self.assertEqual( response.status_code, 302 )
# check project is created
self.assertEqual( Project.objects.count(), 1 )
test_project_1 = Project.objects.get(id=1)
self.assertEqual( test_project_1.is_member(test_admin_user), True )
response = c_a.get( reverse_lazy('project:project_create_repo', args = (0,) ) )
self.assertEqual( response.status_code, 404 )
# check - project search view files page is available
response = c.get( reverse_lazy('project:project_view_files', args = (test_project_1.id,) ) )
self.assertContains(response, TEST_PROJECT_FULLNAME, status_code=200 )
response = c_a.get( reverse_lazy('project:project_create_repo', args = (test_project_1.id,) ) )
self.assertEqual( response.status_code, 302 )
test_project_1.refresh_from_db()
self.assertTrue( test_project_1.have_repo() )
# check - project search view files page is available
response = c.get( reverse_lazy('project:project_view_files', args = (test_project_1.id,) ) )
self.assertContains(response, TEST_PROJECT_FULLNAME, status_code=200 )
response = c_a.get( reverse_lazy('project:project_view_file_commit_view', args = (test_project_1.id, 0, ) ) )
self.assertEqual( response.status_code, 200 )
# check - try to create repo for project already has one
response = c_a.get( reverse_lazy('project:project_create_repo', args = (test_project_1.id,) ), follow = True )
# https://stackoverflow.com/questions/16143149/django-testing-check-messages-for-a-view-that-redirects
self.assertEqual( response.status_code, 200 )
self.assertRedirects(response, test_project_1.get_absolute_url() + 'files/')
msg = list(response.context.get('messages'))[0]
self.assertEqual( msg.tags, "error" )
self.assertEqual( msg.message, "Project already have a repo!" )
| |
import re
import datetime
from bson.objectid import ObjectId
from .meta import DataTypeDefinition
from .errors import DataTypeMismatch
def check_defaults(func):
def inner(self, value):
if value is None:
if self.default is not None:
return self.default
elif self.nullable is True:
return None
else:
raise DataTypeMismatch(
"You have left a required field in this form empty.")
return func(self, value)
return inner
class DataType(DataTypeDefinition):
datatype = None
default = None
nullable = True
searchable = False
forbidden = False
choices = None
def __init__(cls, **kwargs):
for i in kwargs:
setattr(cls, i, kwargs[i])
def humanize(cls, value):
return value
@check_defaults
def dbfy(self, value):
if self.datatype == type(value):
return value
return self.datatype(value)
class Unichar(DataType):
datatype = unicode
@check_defaults
def dbfy(cls, value):
if not isinstance(value, basestring):
raise DataTypeMismatch(
"A field in this form requires a text "
"string but you entered this instead: %s" % value)
return super(Unichar, cls).dbfy(value)
class Regex(Unichar):
def __init__(cls, regex, **kwargs):
if not regex:
raise DataTypeMismatch(
"regex is required param for Regex DataType")
if isinstance(regex, str):
regex = re.compile(regex)
if not isinstance(regex, re._pattern_type):
raise DataTypeMismatch("Invalid regex in Regex DataType")
cls.regex = regex
Unichar.__init__(cls, **kwargs)
@check_defaults
def dbfy(cls, value):
if cls.nullable and value in ['', None]:
return value
if cls.regex.search(value):
return value
raise DataTypeMismatch("Invalid value %s" % value)
id_re = re.compile('^\d{19}\w{5}$')
url_re = re.compile(
r'^file:///|https?://' # http:// or https://
# domain...
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6}\.?|'
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|/\S+)$', re.IGNORECASE)
email_re = re.compile(r'\w+(\.\w+)*@[-+\w]+(\.\w+)+')
epoch_re = re.compile(r'^\d{13}$')
class ID(Regex):
def __init__(cls, **kwargs):
Regex.__init__(cls, id_re, **kwargs)
@check_defaults
def dbfy(self, value):
if isinstance(value, ObjectId):
return value
return super(ID, self).dbfy(value)
class Email(Regex):
def __init__(cls, **kwargs):
Regex.__init__(cls, email_re, **kwargs)
@check_defaults
def dbfy(cls, value):
return Regex.dbfy(cls, value.strip())
class URL(Regex):
def __init__(cls, **kwargs):
Regex.__init__(cls, url_re, **kwargs)
@check_defaults
def dbfy(cls, value):
if value and not value.startswith(('ftp:', 'http:', 'https', 'file:')):
value = "http://%s" % value
try:
val = Regex.dbfy(cls, value)
except DataTypeMismatch, e:
raise DataTypeMismatch(
"%s does not match a valid URL scheme" % value)
else:
return val
class Boolean(DataType):
datatype = bool
nullable = False
default = False
class Integer(DataType):
datatype = int
nullable = False
default = 0
@check_defaults
def dbfy(cls, value):
if cls.datatype != type(value):
value = cls.datatype(value)
if value and ((value > 2 ** 64 / 2 - 1) or (value < -2 ** 64 / 2)):
raise DataTypeMismatch('Only 8-byte integer are supported')
return value
class Decimal(DataType):
datatype = float
nullable = False
default = 0
def humanize(cls, value):
return '%.2f' % float(value)
@check_defaults
def dbfy(cls, value):
return float(value)
class Currency(Decimal):
datatype = float
nullable = False
default = 0
@check_defaults
def dbfy(cls, value):
value = float(value)
if value < 0:
raise Excpetion("Currency cannot be a negative value")
return value
class Html(Unichar):
pass
class Dict(DataType):
datatype = dict
default = {}
class List(DataType):
datatype = list
default = []
@check_defaults
def dbfy(cls, value):
if value is None:
return value
if not isinstance(value, (list, tuple, set)):
raise DataTypeMismatch("Expected a List but found %s" % value)
return list(value)
class Datetime(DataType):
datatype = datetime.datetime
@check_defaults
def dbfy(cls, value):
if not isinstance(value, cls.datatype):
raise DataTypeMismatch(
"Expected datetime. Found %s instead" % value)
return value
class Timestamp(Regex):
def __init__(cls, **kwargs):
Regex.__init__(cls, epoch_re, **kwargs)
@check_defaults
def dbfy(cls, value):
return int(Regex.dbfy(cls, str(value)))
| |
###############################################################################
# EvoMan FrameWork - V1.0 2016 #
# DEMO : Neuroevolution - Genetic Algorithm neural network. #
# Author: Karine Miras #
# karine.smiras@gmail.com #
###############################################################################
# imports framework
import sys
sys.path.insert(0, 'evoman')
from environment import Environment
from demo_controller import player_controller
# imports other libs
import time
import numpy as np
from math import fabs,sqrt
import glob, os
# choose this for not using visuals and thus making experiments faster
headless = True
if headless:
os.environ["SDL_VIDEODRIVER"] = "dummy"
experiment_name = 'individual_demo'
if not os.path.exists(experiment_name):
os.makedirs(experiment_name)
n_hidden_neurons = 10
# initializes simulation in individual evolution mode, for single static enemy.
env = Environment(experiment_name=experiment_name,
enemies=[2],
playermode="ai",
player_controller=player_controller(n_hidden_neurons),
enemymode="static",
level=2,
speed="fastest")
# default environment fitness is assumed for experiment
env.state_to_log() # checks environment state
#### Optimization for controller solution (best genotype-weights for phenotype-network): Ganetic Algorihm ###
ini = time.time() # sets time marker
# genetic algorithm params
run_mode = 'train' # train or test
# number of weights for multilayer with 10 hidden neurons
n_vars = (env.get_num_sensors()+1)*n_hidden_neurons + (n_hidden_neurons+1)*5
dom_u = 1
dom_l = -1
npop = 100
gens = 30
mutation = 0.2
last_best = 0
# runs simulation
def simulation(env,x):
f,p,e,t = env.play(pcont=x)
return f
# normalizes
def norm(x, pfit_pop):
if ( max(pfit_pop) - min(pfit_pop) ) > 0:
x_norm = ( x - min(pfit_pop) )/( max(pfit_pop) - min(pfit_pop) )
else:
x_norm = 0
if x_norm <= 0:
x_norm = 0.0000000001
return x_norm
# evaluation
def evaluate(x):
return np.array(list(map(lambda y: simulation(env,y), x)))
# tournament
def tournament(pop):
c1 = np.random.randint(0,pop.shape[0], 1)
c2 = np.random.randint(0,pop.shape[0], 1)
if fit_pop[c1] > fit_pop[c2]:
return pop[c1][0]
else:
return pop[c2][0]
# limits
def limits(x):
if x>dom_u:
return dom_u
elif x<dom_l:
return dom_l
else:
return x
# crossover
def crossover(pop):
total_offspring = np.zeros((0,n_vars))
for p in range(0,pop.shape[0], 2):
p1 = tournament(pop)
p2 = tournament(pop)
n_offspring = np.random.randint(1,3+1, 1)[0]
offspring = np.zeros( (n_offspring, n_vars) )
for f in range(0,n_offspring):
cross_prop = np.random.uniform(0,1)
offspring[f] = p1*cross_prop+p2*(1-cross_prop)
# mutation
for i in range(0,len(offspring[f])):
if np.random.uniform(0 ,1)<=mutation:
offspring[f][i] = offspring[f][i]+np.random.normal(0, 1)
offspring[f] = np.array(list(map(lambda y: limits(y), offspring[f])))
total_offspring = np.vstack((total_offspring, offspring[f]))
return total_offspring
# kills the worst genomes, and replace with new best/random solutions
def doomsday(pop,fit_pop):
worst = int(npop/4) # a quarter of the population
order = np.argsort(fit_pop)
orderasc = order[0:worst]
for o in orderasc:
for j in range(0,n_vars):
pro = np.random.uniform(0,1)
if np.random.uniform(0,1) <= pro:
pop[o][j] = np.random.uniform(dom_l, dom_u) # random dna, uniform dist.
else:
pop[o][j] = pop[order[-1:]][0][j] # dna from best
fit_pop[o]=evaluate([pop[o]])
return pop,fit_pop
# loads file with the best solution for testing
if run_mode =='test':
bsol = np.loadtxt(experiment_name+'/best.txt')
print( '\n RUNNING SAVED BEST SOLUTION \n')
env.update_parameter('speed','normal')
evaluate([bsol])
sys.exit(0)
# initializes population loading old solutions or generating new ones
if not os.path.exists(experiment_name+'/evoman_solstate'):
print( '\nNEW EVOLUTION\n')
pop = np.random.uniform(dom_l, dom_u, (npop, n_vars))
fit_pop = evaluate(pop)
best = np.argmax(fit_pop)
mean = np.mean(fit_pop)
std = np.std(fit_pop)
ini_g = 0
solutions = [pop, fit_pop]
env.update_solutions(solutions)
else:
print( '\nCONTINUING EVOLUTION\n')
env.load_state()
pop = env.solutions[0]
fit_pop = env.solutions[1]
best = np.argmax(fit_pop)
mean = np.mean(fit_pop)
std = np.std(fit_pop)
# finds last generation number
file_aux = open(experiment_name+'/gen.txt','r')
ini_g = int(file_aux.readline())
file_aux.close()
# saves results for first pop
file_aux = open(experiment_name+'/results.txt','a')
file_aux.write('\n\ngen best mean std')
print( '\n GENERATION '+str(ini_g)+' '+str(round(fit_pop[best],6))+' '+str(round(mean,6))+' '+str(round(std,6)))
file_aux.write('\n'+str(ini_g)+' '+str(round(fit_pop[best],6))+' '+str(round(mean,6))+' '+str(round(std,6)) )
file_aux.close()
# evolution
last_sol = fit_pop[best]
notimproved = 0
for i in range(ini_g+1, gens):
offspring = crossover(pop) # crossover
fit_offspring = evaluate(offspring) # evaluation
pop = np.vstack((pop,offspring))
fit_pop = np.append(fit_pop,fit_offspring)
best = np.argmax(fit_pop) #best solution in generation
fit_pop[best] = float(evaluate(np.array([pop[best] ]))[0]) # repeats best eval, for stability issues
best_sol = fit_pop[best]
# selection
fit_pop_cp = fit_pop
fit_pop_norm = np.array(list(map(lambda y: norm(y,fit_pop_cp), fit_pop))) # avoiding negative probabilities, as fitness is ranges from negative numbers
probs = (fit_pop_norm)/(fit_pop_norm).sum()
chosen = np.random.choice(pop.shape[0], npop , p=probs, replace=False)
chosen = np.append(chosen[1:],best)
pop = pop[chosen]
fit_pop = fit_pop[chosen]
# searching new areas
if best_sol <= last_sol:
notimproved += 1
else:
last_sol = best_sol
notimproved = 0
if notimproved >= 15:
file_aux = open(experiment_name+'/results.txt','a')
file_aux.write('\ndoomsday')
file_aux.close()
pop, fit_pop = doomsday(pop,fit_pop)
notimproved = 0
best = np.argmax(fit_pop)
std = np.std(fit_pop)
mean = np.mean(fit_pop)
# saves results
file_aux = open(experiment_name+'/results.txt','a')
print( '\n GENERATION '+str(i)+' '+str(round(fit_pop[best],6))+' '+str(round(mean,6))+' '+str(round(std,6)))
file_aux.write('\n'+str(i)+' '+str(round(fit_pop[best],6))+' '+str(round(mean,6))+' '+str(round(std,6)) )
file_aux.close()
# saves generation number
file_aux = open(experiment_name+'/gen.txt','w')
file_aux.write(str(i))
file_aux.close()
# saves file with the best solution
np.savetxt(experiment_name+'/best.txt',pop[best])
# saves simulation state
solutions = [pop, fit_pop]
env.update_solutions(solutions)
env.save_state()
fim = time.time() # prints total execution time for experiment
print( '\nExecution time: '+str(round((fim-ini)/60))+' minutes \n')
file = open(experiment_name+'/neuroended', 'w') # saves control (simulation has ended) file for bash loop file
file.close()
env.state_to_log() # checks environment state
| |
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tempfile
from django.conf import settings
from django.core.files.uploadedfile import InMemoryUploadedFile # noqa
from django.core.urlresolvers import reverse
from django.forms.widgets import HiddenInput # noqa
from django import http
from django.test.utils import override_settings
from mox3.mox import IsA # noqa
from horizon import tables as horizon_tables
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from openstack_dashboard.dashboards.project.images.images import forms
from openstack_dashboard.dashboards.project.images.images import tables
IMAGES_INDEX_URL = reverse('horizon:project:images:index')
class CreateImageFormTests(test.TestCase):
@test.create_stubs({api.glance: ('image_list_detailed',)})
def test_no_location_or_file(self):
filters = {'disk_format': 'aki'}
api.glance.image_list_detailed(
IsA({}), filters=filters).AndReturn(
[self.images.list(), False, False])
filters = {'disk_format': 'ari'}
api.glance.image_list_detailed(
IsA({}), filters=filters).AndReturn(
[self.images.list(), False, False])
self.mox.ReplayAll()
post = {
'name': u'Ubuntu 11.10',
'source_type': u'file',
'description': u'Login with admin/admin',
'disk_format': u'qcow2',
'architecture': u'x86-64',
'minimum_disk': 15,
'minimum_ram': 512,
'is_public': 1}
files = {}
form = forms.CreateImageForm(post, files)
self.assertFalse(form.is_valid())
@override_settings(HORIZON_IMAGES_ALLOW_UPLOAD=False)
@test.create_stubs({api.glance: ('image_list_detailed',)})
def test_image_upload_disabled(self):
filters = {'disk_format': 'aki'}
api.glance.image_list_detailed(
IsA({}), filters=filters).AndReturn(
[self.images.list(), False, False])
filters = {'disk_format': 'ari'}
api.glance.image_list_detailed(
IsA({}), filters=filters).AndReturn(
[self.images.list(), False, False])
self.mox.ReplayAll()
form = forms.CreateImageForm({})
self.assertEqual(
isinstance(form.fields['image_file'].widget, HiddenInput), True)
source_type_dict = dict(form.fields['source_type'].choices)
self.assertNotIn('file', source_type_dict)
def test_create_image_metadata_docker(self):
form_data = {
'name': u'Docker image',
'description': u'Docker image test',
'source_type': u'url',
'image_url': u'/',
'disk_format': u'docker',
'architecture': u'x86-64',
'minimum_disk': 15,
'minimum_ram': 512,
'is_public': False,
'protected': False,
'is_copying': False
}
meta = forms.create_image_metadata(form_data)
self.assertEqual(meta['disk_format'], 'raw')
self.assertEqual(meta['container_format'], 'docker')
self.assertIn('properties', meta)
self.assertNotIn('description', meta)
self.assertNotIn('architecture', meta)
self.assertEqual(meta['properties']['description'],
form_data['description'])
self.assertEqual(meta['properties']['architecture'],
form_data['architecture'])
class UpdateImageFormTests(test.TestCase):
def test_is_format_field_editable(self):
form = forms.UpdateImageForm({})
disk_format = form.fields['disk_format']
self.assertFalse(disk_format.widget.attrs.get('readonly', False))
@test.create_stubs({api.glance: ('image_get',)})
def test_image_update(self):
image = self.images.first()
api.glance.image_get(IsA(http.HttpRequest), str(image.id)) \
.AndReturn(image)
self.mox.ReplayAll()
url = reverse('horizon:project:images:images:update',
args=[image.id])
res = self.client.get(url)
self.assertNoFormErrors(res)
self.assertEqual(res.context['image'].disk_format,
image.disk_format)
@test.create_stubs({api.glance: ('image_update', 'image_get')})
def test_image_update_post(self):
image = self.images.first()
data = {
'name': u'Ubuntu 11.10',
'image_id': str(image.id),
'description': u'Login with admin/admin',
'source_type': u'url',
'image_url': u'http://cloud-images.ubuntu.com/releases/'
u'oneiric/release/ubuntu-11.10-server-cloudimg'
u'-amd64-disk1.img',
'disk_format': u'qcow2',
'architecture': u'x86-64',
'minimum_disk': 15,
'minimum_ram': 512,
'is_public': False,
'protected': False,
'method': 'UpdateImageForm'}
api.glance.image_get(IsA(http.HttpRequest), str(image.id)) \
.AndReturn(image)
api.glance.image_update(IsA(http.HttpRequest),
image.id,
is_public=data['is_public'],
protected=data['protected'],
disk_format=data['disk_format'],
container_format="bare",
name=data['name'],
min_ram=data['minimum_ram'],
min_disk=data['minimum_disk'],
properties={'description': data['description'],
'architecture':
data['architecture']},
purge_props=False).AndReturn(image)
self.mox.ReplayAll()
url = reverse('horizon:project:images:images:update',
args=[image.id])
res = self.client.post(url, data)
self.assertNoFormErrors(res)
self.assertEqual(res.status_code, 302)
class ImageViewTests(test.TestCase):
@test.create_stubs({api.glance: ('image_list_detailed',)})
def test_image_create_get(self):
filters = {'disk_format': 'aki'}
api.glance.image_list_detailed(
IsA(http.HttpRequest), filters=filters).AndReturn(
[self.images.list(), False, False])
filters = {'disk_format': 'ari'}
api.glance.image_list_detailed(
IsA(http.HttpRequest), filters=filters).AndReturn(
[self.images.list(), False, False])
self.mox.ReplayAll()
url = reverse('horizon:project:images:images:create')
res = self.client.get(url)
self.assertTemplateUsed(res,
'project/images/images/create.html')
@test.create_stubs({api.glance: ('image_create',)})
def test_image_create_post_copy_from(self):
data = {
'source_type': u'url',
'image_url': u'http://cloud-images.ubuntu.com/releases/'
u'oneiric/release/ubuntu-11.10-server-cloudimg'
u'-amd64-disk1.img',
'is_copying': True}
api_data = {'copy_from': data['image_url']}
self._test_image_create(data, api_data)
@test.create_stubs({api.glance: ('image_create',)})
def test_image_create_post_location(self):
data = {
'source_type': u'url',
'image_url': u'http://cloud-images.ubuntu.com/releases/'
u'oneiric/release/ubuntu-11.10-server-cloudimg'
u'-amd64-disk1.img',
'is_copying': False}
api_data = {'location': data['image_url']}
self._test_image_create(data, api_data)
@test.create_stubs({api.glance: ('image_create',)})
def test_image_create_post_upload(self):
temp_file = tempfile.NamedTemporaryFile()
temp_file.write(b'123')
temp_file.flush()
temp_file.seek(0)
data = {'source_type': u'file',
'image_file': temp_file}
api_data = {'data': IsA(InMemoryUploadedFile)}
self._test_image_create(data, api_data)
@test.create_stubs({api.glance: ('image_create',)})
def test_image_create_post_with_kernel_ramdisk(self):
temp_file = tempfile.NamedTemporaryFile()
temp_file.write(b'123')
temp_file.flush()
temp_file.seek(0)
data = {
'source_type': u'file',
'image_file': temp_file,
'kernel_id': '007e7d55-fe1e-4c5c-bf08-44b4a496482e',
'ramdisk_id': '007e7d55-fe1e-4c5c-bf08-44b4a496482a'
}
api_data = {'data': IsA(InMemoryUploadedFile)}
self._test_image_create(data, api_data)
@test.create_stubs({api.glance: ('image_list_detailed',)})
def _test_image_create(self, extra_form_data, extra_api_data):
data = {
'name': u'Ubuntu 11.10',
'description': u'Login with admin/admin',
'disk_format': u'qcow2',
'architecture': u'x86-64',
'minimum_disk': 15,
'minimum_ram': 512,
'is_public': True,
'protected': False,
'method': 'CreateImageForm'}
data.update(extra_form_data)
api_data = {'container_format': 'bare',
'disk_format': data['disk_format'],
'is_public': True,
'protected': False,
'min_disk': data['minimum_disk'],
'min_ram': data['minimum_ram'],
'properties': {
'description': data['description'],
'architecture': data['architecture']},
'name': data['name']}
api_data.update(extra_api_data)
filters = {'disk_format': 'aki'}
api.glance.image_list_detailed(
IsA(http.HttpRequest), filters=filters).AndReturn(
[self.images.list(), False, False])
filters = {'disk_format': 'ari'}
api.glance.image_list_detailed(
IsA(http.HttpRequest), filters=filters).AndReturn(
[self.images.list(), False, False])
api.glance.image_create(
IsA(http.HttpRequest),
**api_data).AndReturn(self.images.first())
self.mox.ReplayAll()
url = reverse('horizon:project:images:images:create')
res = self.client.post(url, data)
self.assertNoFormErrors(res)
self.assertEqual(res.status_code, 302)
@test.create_stubs({api.glance: ('image_get',)})
def test_image_detail_get(self):
image = self.images.first()
api.glance.image_get(IsA(http.HttpRequest), str(image.id)) \
.AndReturn(self.images.first())
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:images:images:detail',
args=[image.id]))
self.assertTemplateUsed(res,
'horizon/common/_detail.html')
self.assertEqual(res.context['image'].name, image.name)
self.assertEqual(res.context['image'].protected, image.protected)
@test.create_stubs({api.glance: ('image_get',)})
def test_image_detail_custom_props_get(self):
image = self.images.list()[8]
api.glance.image_get(IsA(http.HttpRequest), str(image.id)) \
.AndReturn(image)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:images:images:detail',
args=[image.id]))
image_props = res.context['image_props']
# Test description property not displayed
image_keys = [prop[0] for prop in image_props]
self.assertNotIn(('description'), image_keys)
# Test custom properties are sorted
self.assertEqual(image_props[0], ('bar', 'bar', 'bar val'))
self.assertEqual(image_props[1], ('foo', 'foo', 'foo val'))
# Test all custom properties appear in template
self.assertContains(res, '<dt title="bar">bar</dt>')
self.assertContains(res, '<dd>bar val</dd>')
self.assertContains(res, '<dt title="foo">foo</dt>')
self.assertContains(res, '<dd>foo val</dd>')
@test.create_stubs({api.glance: ('image_get',)})
def test_protected_image_detail_get(self):
image = self.images.list()[2]
api.glance.image_get(IsA(http.HttpRequest), str(image.id)) \
.AndReturn(image)
self.mox.ReplayAll()
res = self.client.get(
reverse('horizon:project:images:images:detail',
args=[image.id]))
self.assertTemplateUsed(res,
'horizon/common/_detail.html')
self.assertEqual(res.context['image'].protected, image.protected)
@test.create_stubs({api.glance: ('image_get',)})
def test_image_detail_get_with_exception(self):
image = self.images.first()
api.glance.image_get(IsA(http.HttpRequest), str(image.id)) \
.AndRaise(self.exceptions.glance)
self.mox.ReplayAll()
url = reverse('horizon:project:images:images:detail',
args=[image.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, IMAGES_INDEX_URL)
@test.create_stubs({api.glance: ('image_get',)})
def test_image_update_get(self):
image = self.images.first()
image.disk_format = "ami"
image.is_public = True
api.glance.image_get(IsA(http.HttpRequest), str(image.id)) \
.AndReturn(image)
self.mox.ReplayAll()
res = self.client.get(
reverse('horizon:project:images:images:update',
args=[image.id]))
self.assertTemplateUsed(res,
'project/images/images/_update.html')
self.assertEqual(res.context['image'].name, image.name)
# Bug 1076216 - is_public checkbox not being set correctly
self.assertContains(res, "<input type='checkbox' id='id_public'"
" name='public' checked='checked'>",
html=True,
msg_prefix="The is_public checkbox is not checked")
class OwnerFilterTests(test.TestCase):
def setUp(self):
super(OwnerFilterTests, self).setUp()
self.table = self.mox.CreateMock(horizon_tables.DataTable)
self.table.request = self.request
@override_settings(IMAGES_LIST_FILTER_TENANTS=[{'name': 'Official',
'tenant': 'officialtenant',
'icon': 'fa-check'}])
def test_filter(self):
self.mox.ReplayAll()
all_images = self.images.list()
table = self.table
self.filter_tenants = settings.IMAGES_LIST_FILTER_TENANTS
filter_ = tables.OwnerFilter()
images = filter_.filter(table, all_images, 'project')
self.assertEqual(images, self._expected('project'))
images = filter_.filter(table, all_images, 'public')
self.assertEqual(images, self._expected('public'))
images = filter_.filter(table, all_images, 'shared')
self.assertEqual(images, self._expected('shared'))
images = filter_.filter(table, all_images, 'officialtenant')
self.assertEqual(images, self._expected('officialtenant'))
def _expected(self, filter_string):
my_tenant_id = self.request.user.tenant_id
images = self.images.list()
special = map(lambda t: t['tenant'], self.filter_tenants)
if filter_string == 'public':
return [im for im in images if im.is_public]
if filter_string == 'shared':
return [im for im in images
if (not im.is_public and
im.owner != my_tenant_id and
im.owner not in special)]
if filter_string == 'project':
filter_string = my_tenant_id
return [im for im in images if im.owner == filter_string]
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""A module that implements the Architecture class, a neural network
architecture description.
"""
import os
from theanolm.backend import IncompatibleStateError, InputError
class Architecture(object):
"""Neural Network Architecture Description
A description of the neural network architecture can be read from a text
file or from a neural network state stored in an HDF5 file.
"""
def __init__(self, inputs, layers, output_layer=None):
"""Constructs a description of the neural network architecture.
:type inputs: list of dict
:param inputs: parameters for each input as a list of dictionaries
:type layers: list of dict
:param layers: parameters for each layer as a list of dictionaries
:type output_layer: str
:param output_layer: name of the layer that gives the output of the
network (the last layer if None)
"""
self.inputs = inputs
if not inputs:
raise ValueError("Cannot construct Architecture without inputs.")
self.layers = layers
if not layers:
raise ValueError("Cannot construct Architecture without layers.")
if output_layer is None:
self.output_layer = layers[-1]['name']
else:
self.output_layer = output_layer
@classmethod
def from_state(cls, state):
"""Constructs a description of the network architecture stored in a
state.
:type state: hdf5.File
:param state: HDF5 file that contains the architecture parameters
"""
if 'architecture' not in state:
raise IncompatibleStateError(
"Architecture is missing from neural network state.")
h5_arch = state['architecture']
if 'inputs' not in h5_arch:
raise IncompatibleStateError(
"Architecture parameter 'inputs' is missing from neural "
"network state.")
h5_inputs = h5_arch['inputs']
inputs = []
for input_id in sorted(h5_inputs.keys()):
h5_input = h5_inputs[input_id]
inputs.append(cls._read_h5_dict(h5_input))
if 'layers' not in h5_arch:
raise IncompatibleStateError(
"Architecture parameter 'layers' is missing from neural "
"network state.")
h5_layers = h5_arch['layers']
layers = []
layer_ids = [int(x) for x in h5_layers.keys()]
for layer_id in sorted(layer_ids):
h5_layer = h5_layers[str(layer_id)]
layers.append(cls._read_h5_dict(h5_layer))
if 'output_layer' not in h5_arch.attrs:
raise IncompatibleStateError(
"Architecture parameter 'output_layer' is missing from "
"neural network state.")
output_layer = h5_arch.attrs['output_layer']
return cls(inputs, layers, output_layer)
@classmethod
def from_description(cls, description_file):
"""Reads a description of the network architecture from a text file.
:type description_file: file or file-like object
:param description_file: text file containing the description
:rtype: Network.Architecture
:returns: an object describing the network architecture
"""
inputs = []
layers = []
for line in description_file:
fields = line.split()
if not fields or fields[0][0] == '#':
continue
if fields[0] == 'input':
input_description = dict()
for field in fields[1:]:
parts = field.split('=', 1)
if len(parts) != 2:
raise InputError(
"'field=value' expected but '{}' found in an input "
"description in '{}'."
.format(field, description_file.name))
variable, value = parts
input_description[variable] = value
if 'type' not in input_description:
raise InputError(
"'type' is not given in an input description in '{}'."
.format(description_file.name))
if 'name' not in input_description:
raise InputError(
"'name' is not given in an input description in '{}'."
.format(description_file.name))
inputs.append(input_description)
elif fields[0] == 'layer':
layer_description = {'inputs': [], 'devices': []}
for field in fields[1:]:
parts = field.split('=', 1)
if len(parts) != 2:
raise InputError(
"'field=value' expected but '{}' found in a layer "
"description in '{}'."
.format(field, description_file.name))
variable, value = parts
if variable == 'size':
layer_description[variable] = int(value)
elif variable == 'input':
layer_description['inputs'].append(value)
elif variable == 'device':
layer_description['devices'].append(value)
else:
layer_description[variable] = value
if 'type' not in layer_description:
raise InputError(
"'type' is not given in a layer description in '{}'."
.format(description_file.name))
if 'name' not in layer_description:
raise InputError(
"'name' is not given in a layer description in '{}'."
.format(description_file.name))
if not layer_description['inputs']:
raise InputError(
"'input' is not given in a layer description in '{}'."
.format(description_file.name))
layers.append(layer_description)
else:
raise InputError("Invalid element in architecture "
"description: {}".format(fields[0]))
if not inputs:
raise InputError("Architecture description contains no inputs.")
if not layers:
raise InputError("Architecture description contains no layers.")
return cls(inputs, layers)
@classmethod
def from_package(cls, name):
"""Reads network architecture from one of the files packaged with
TheanoLM.
:type name: str
:param name: name of a standard architecture file (without directory or
file extension)
:rtype: Network.Architecture
:returns: an object describing the network architecture
"""
file_dir = os.path.abspath(os.path.dirname(__file__))
package_dir = os.path.dirname(file_dir)
description_path = os.path.join(package_dir,
'architectures',
name + '.arch')
with open(description_path, 'rt', encoding='utf-8') as description_file:
return cls.from_description(description_file)
def get_state(self, state):
"""Saves the architecture parameters in a HDF5 file.
The variable values will be saved as attributes of HDF5 groups. A
group will be created for each level of the hierarchy.
:type state: h5py.File
:param state: HDF5 file for storing the architecture parameters
"""
h5_arch = state.require_group('architecture')
h5_inputs = h5_arch.require_group('inputs')
for input_id, input_value in enumerate(self.inputs):
h5_input = h5_inputs.require_group(str(input_id))
self._write_h5_dict(h5_input, input_value)
h5_layers = h5_arch.require_group('layers')
for layer_id, layer in enumerate(self.layers):
h5_layer = h5_layers.require_group(str(layer_id))
self._write_h5_dict(h5_layer, layer)
h5_arch.attrs['output_layer'] = self.output_layer
def check_state(self, state):
"""Checks that the architecture stored in a state matches this
network architecture, and raises an ``IncompatibleStateError``
if not.
:type state: h5py.File
:param state: HDF5 file that contains the architecture parameters
"""
if 'architecture' not in state:
raise IncompatibleStateError(
"Architecture is missing from neural network state.")
h5_arch = state['architecture']
if 'inputs' not in h5_arch:
raise IncompatibleStateError(
"Architecture parameter 'inputs' is missing from neural "
"network state.")
h5_inputs = h5_arch['inputs']
for input_id, input_value in enumerate(self.inputs):
h5_input = h5_inputs[str(input_id)]
self._check_h5_dict(h5_input, input_value)
if 'layers' not in h5_arch:
raise IncompatibleStateError(
"Architecture parameter 'layers' is missing from neural "
"network state.")
h5_layers = h5_arch['layers']
for layer_id, layer in enumerate(self.layers):
h5_layer = h5_layers[str(layer_id)]
self._check_h5_dict(h5_layer, layer)
if 'output_layer' not in h5_arch.attrs:
raise IncompatibleStateError(
"Architecture parameter 'output_layer' is missing from "
"neural network state.")
h5_value = h5_arch.attrs['output_layer']
if self.output_layer != h5_value:
raise IncompatibleStateError(
"Neural network state has output_layer={1}, while "
"this architecture has output_layer={0}.".format(
self.output_layer, h5_value))
@staticmethod
def _read_h5_dict(h5_dict):
"""Reads a dictionary from a HDF5 file.
The dictionary can store strings, which are represented as HDF5
attributes, and lists of strings, which are represented as attributes of
HDF5 subgroups.
:type h5_dict: h5py.Group
:param h5_dict: The HD5 group that stores a dictionary of strings in its
attributes
"""
result = dict()
for variable in h5_dict.attrs:
result[variable] = h5_dict.attrs[variable]
for variable in h5_dict:
values = []
h5_values = h5_dict[variable]
for value_id in sorted(h5_values.attrs.keys()):
values.append(h5_values.attrs[value_id])
result[variable] = values
return result
@staticmethod
def _write_h5_dict(h5_group, variables):
"""Writes a dictionary to a HDF5 file.
The dictionary can store strings, which are represented as HDF5
attributes, and lists of strings, which are represented as attributes of
HDF5 subgroups.
:type h5_group: h5py.Group
:param h5_group: The HD5 group that will store a dictionary of strings
in its attributes
:type variables: dict
:param variables: a dictionary that may contain strings and lists of strings
"""
for variable, values in variables.items():
if isinstance(values, list):
h5_values = h5_group.require_group(variable)
for value_id, value in enumerate(values):
h5_values.attrs[str(value_id)] = value
else:
h5_group.attrs[variable] = values
@staticmethod
def _check_h5_dict(h5_group, variables):
"""Checks that a dictionary matches a HDF5 group and raises an
``IncompatibleStateError`` if not.
The dictionary can store strings, which are represented as HDF5
attributes, and lists of strings, which are represented as attributes of
HDF5 subgroups.
:type h5_group: h5py.Group
:param h5_group: The HD5 group that will store a dictionary of strings
in its attributes
:type variables: dict
:param variables: a dictionary that may contain strings and lists of
strings
"""
for variable, values in variables.items():
if isinstance(values, list):
# For backward compatibility. Remove at some point.
if (variable == 'devices') and (not variable in h5_group):
if not values:
continue
else:
raise IncompatibleStateError(
"Neural network state does not specify devices, "
"while this architecture uses {}."
.format(', '.join(str(x) for x in values)))
h5_values = h5_group[variable]
for value_id, value in enumerate(values):
h5_value = h5_values.attrs[str(value_id)]
if value != h5_value:
raise IncompatibleStateError(
"Neural network state has {0}={2}, while this "
"architecture has {0}={1}."
.format(variable, value, h5_value))
else:
h5_value = h5_group.attrs[variable]
if values != h5_value:
raise IncompatibleStateError(
"Neural network state has {0}={2}, while this "
"architecture has {0}={1}."
.format(variable, values, h5_value))
| |
# Copyright (c) 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Volume-related Utilities and helpers."""
import ast
import functools
import json
import math
import operator
from os import urandom
import re
import time
import uuid
from castellan.common.credentials import keystone_password
from castellan.common import exception as castellan_exception
from castellan import key_manager as castellan_key_manager
import eventlet
from eventlet import tpool
from keystoneauth1 import loading as ks_loading
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import strutils
from oslo_utils import timeutils
from oslo_utils import units
from random import shuffle
import six
from six.moves import range
from cinder.brick.local_dev import lvm as brick_lvm
from cinder import context
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder import rpc
from cinder import utils
from cinder.volume import group_types
from cinder.volume import throttling
from cinder.volume import volume_types
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def null_safe_str(s):
return str(s) if s else ''
def _usage_from_volume(context, volume_ref, **kw):
now = timeutils.utcnow()
launched_at = volume_ref['launched_at'] or now
created_at = volume_ref['created_at'] or now
volume_status = volume_ref['status']
if volume_status == 'error_managing_deleting':
volume_status = 'deleting'
usage_info = dict(
tenant_id=volume_ref['project_id'],
host=volume_ref['host'],
user_id=volume_ref['user_id'],
availability_zone=volume_ref['availability_zone'],
volume_id=volume_ref['id'],
volume_type=volume_ref['volume_type_id'],
display_name=volume_ref['display_name'],
launched_at=launched_at.isoformat(),
created_at=created_at.isoformat(),
status=volume_status,
snapshot_id=volume_ref['snapshot_id'],
size=volume_ref['size'],
replication_status=volume_ref['replication_status'],
replication_extended_status=volume_ref['replication_extended_status'],
replication_driver_data=volume_ref['replication_driver_data'],
metadata=volume_ref.get('volume_metadata'),)
usage_info.update(kw)
try:
attachments = db.volume_attachment_get_all_by_volume_id(
context, volume_ref['id'])
usage_info['volume_attachment'] = attachments
glance_meta = db.volume_glance_metadata_get(context, volume_ref['id'])
if glance_meta:
usage_info['glance_metadata'] = glance_meta
except exception.GlanceMetadataNotFound:
pass
except exception.VolumeNotFound:
LOG.debug("Can not find volume %s at notify usage", volume_ref['id'])
return usage_info
def _usage_from_backup(backup, **kw):
num_dependent_backups = backup.num_dependent_backups
usage_info = dict(tenant_id=backup.project_id,
user_id=backup.user_id,
availability_zone=backup.availability_zone,
backup_id=backup.id,
host=backup.host,
display_name=backup.display_name,
created_at=backup.created_at.isoformat(),
status=backup.status,
volume_id=backup.volume_id,
size=backup.size,
service_metadata=backup.service_metadata,
service=backup.service,
fail_reason=backup.fail_reason,
parent_id=backup.parent_id,
num_dependent_backups=num_dependent_backups,
snapshot_id=backup.snapshot_id,
)
usage_info.update(kw)
return usage_info
@utils.if_notifications_enabled
def notify_about_volume_usage(context, volume, event_suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_volume(context, volume, **extra_usage_info)
rpc.get_notifier("volume", host).info(context, 'volume.%s' % event_suffix,
usage_info)
@utils.if_notifications_enabled
def notify_about_backup_usage(context, backup, event_suffix,
extra_usage_info=None,
host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_backup(backup, **extra_usage_info)
rpc.get_notifier("backup", host).info(context, 'backup.%s' % event_suffix,
usage_info)
def _usage_from_snapshot(snapshot, context, **extra_usage_info):
# (niedbalski) a snapshot might be related to a deleted
# volume, if that's the case, the volume information is still
# required for filling the usage_info, so we enforce to read
# the volume data even if the volume has been deleted.
context.read_deleted = "yes"
volume = db.volume_get(context, snapshot.volume_id)
usage_info = {
'tenant_id': snapshot.project_id,
'user_id': snapshot.user_id,
'availability_zone': volume['availability_zone'],
'volume_id': snapshot.volume_id,
'volume_size': snapshot.volume_size,
'snapshot_id': snapshot.id,
'display_name': snapshot.display_name,
'created_at': snapshot.created_at.isoformat(),
'status': snapshot.status,
'deleted': null_safe_str(snapshot.deleted),
'metadata': null_safe_str(snapshot.metadata),
}
usage_info.update(extra_usage_info)
return usage_info
@utils.if_notifications_enabled
def notify_about_snapshot_usage(context, snapshot, event_suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_snapshot(snapshot, context, **extra_usage_info)
rpc.get_notifier('snapshot', host).info(context,
'snapshot.%s' % event_suffix,
usage_info)
def _usage_from_capacity(capacity, **extra_usage_info):
capacity_info = {
'name_to_id': capacity['name_to_id'],
'total': capacity['total'],
'free': capacity['free'],
'allocated': capacity['allocated'],
'provisioned': capacity['provisioned'],
'virtual_free': capacity['virtual_free'],
'reported_at': capacity['reported_at']
}
capacity_info.update(extra_usage_info)
return capacity_info
@utils.if_notifications_enabled
def notify_about_capacity_usage(context, capacity, suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_capacity(capacity, **extra_usage_info)
rpc.get_notifier('capacity', host).info(context,
'capacity.%s' % suffix,
usage_info)
@utils.if_notifications_enabled
def notify_about_replication_usage(context, volume, suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_volume(context, volume,
**extra_usage_info)
rpc.get_notifier('replication', host).info(context,
'replication.%s' % suffix,
usage_info)
@utils.if_notifications_enabled
def notify_about_replication_error(context, volume, suffix,
extra_error_info=None, host=None):
if not host:
host = CONF.host
if not extra_error_info:
extra_error_info = {}
usage_info = _usage_from_volume(context, volume,
**extra_error_info)
rpc.get_notifier('replication', host).error(context,
'replication.%s' % suffix,
usage_info)
def _usage_from_consistencygroup(group_ref, **kw):
usage_info = dict(tenant_id=group_ref.project_id,
user_id=group_ref.user_id,
availability_zone=group_ref.availability_zone,
consistencygroup_id=group_ref.id,
name=group_ref.name,
created_at=group_ref.created_at.isoformat(),
status=group_ref.status)
usage_info.update(kw)
return usage_info
@utils.if_notifications_enabled
def notify_about_consistencygroup_usage(context, group, event_suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_consistencygroup(group,
**extra_usage_info)
rpc.get_notifier("consistencygroup", host).info(
context,
'consistencygroup.%s' % event_suffix,
usage_info)
def _usage_from_group(group_ref, **kw):
usage_info = dict(tenant_id=group_ref.project_id,
user_id=group_ref.user_id,
availability_zone=group_ref.availability_zone,
group_id=group_ref.id,
group_type=group_ref.group_type_id,
name=group_ref.name,
created_at=group_ref.created_at.isoformat(),
status=group_ref.status)
usage_info.update(kw)
return usage_info
@utils.if_notifications_enabled
def notify_about_group_usage(context, group, event_suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_group(group,
**extra_usage_info)
rpc.get_notifier("group", host).info(
context,
'group.%s' % event_suffix,
usage_info)
def _usage_from_cgsnapshot(cgsnapshot, **kw):
usage_info = dict(
tenant_id=cgsnapshot.project_id,
user_id=cgsnapshot.user_id,
cgsnapshot_id=cgsnapshot.id,
name=cgsnapshot.name,
consistencygroup_id=cgsnapshot.consistencygroup_id,
created_at=cgsnapshot.created_at.isoformat(),
status=cgsnapshot.status)
usage_info.update(kw)
return usage_info
def _usage_from_group_snapshot(group_snapshot, **kw):
usage_info = dict(
tenant_id=group_snapshot.project_id,
user_id=group_snapshot.user_id,
group_snapshot_id=group_snapshot.id,
name=group_snapshot.name,
group_id=group_snapshot.group_id,
group_type=group_snapshot.group_type_id,
created_at=group_snapshot.created_at.isoformat(),
status=group_snapshot.status)
usage_info.update(kw)
return usage_info
@utils.if_notifications_enabled
def notify_about_cgsnapshot_usage(context, cgsnapshot, event_suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_cgsnapshot(cgsnapshot,
**extra_usage_info)
rpc.get_notifier("cgsnapshot", host).info(
context,
'cgsnapshot.%s' % event_suffix,
usage_info)
@utils.if_notifications_enabled
def notify_about_group_snapshot_usage(context, group_snapshot, event_suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_group_snapshot(group_snapshot,
**extra_usage_info)
rpc.get_notifier("group_snapshot", host).info(
context,
'group_snapshot.%s' % event_suffix,
usage_info)
def _check_blocksize(blocksize):
# Check if volume_dd_blocksize is valid
try:
# Rule out zero-sized/negative/float dd blocksize which
# cannot be caught by strutils
if blocksize.startswith(('-', '0')) or '.' in blocksize:
raise ValueError
strutils.string_to_bytes('%sB' % blocksize)
except ValueError:
LOG.warning("Incorrect value error: %(blocksize)s, "
"it may indicate that \'volume_dd_blocksize\' "
"was configured incorrectly. Fall back to default.",
{'blocksize': blocksize})
# Fall back to default blocksize
CONF.clear_override('volume_dd_blocksize')
blocksize = CONF.volume_dd_blocksize
return blocksize
def check_for_odirect_support(src, dest, flag='oflag=direct'):
# Check whether O_DIRECT is supported
try:
# iflag=direct and if=/dev/zero combination does not work
# error: dd: failed to open '/dev/zero': Invalid argument
if (src == '/dev/zero' and flag == 'iflag=direct'):
return False
else:
utils.execute('dd', 'count=0', 'if=%s' % src,
'of=%s' % dest,
flag, run_as_root=True)
return True
except processutils.ProcessExecutionError:
return False
def _copy_volume_with_path(prefix, srcstr, deststr, size_in_m, blocksize,
sync=False, execute=utils.execute, ionice=None,
sparse=False):
cmd = prefix[:]
if ionice:
cmd.extend(('ionice', ionice))
blocksize = _check_blocksize(blocksize)
size_in_bytes = size_in_m * units.Mi
cmd.extend(('dd', 'if=%s' % srcstr, 'of=%s' % deststr,
'count=%d' % size_in_bytes, 'bs=%s' % blocksize))
# Use O_DIRECT to avoid thrashing the system buffer cache
odirect = check_for_odirect_support(srcstr, deststr, 'iflag=direct')
cmd.append('iflag=count_bytes,direct' if odirect else 'iflag=count_bytes')
if check_for_odirect_support(srcstr, deststr, 'oflag=direct'):
cmd.append('oflag=direct')
odirect = True
# If the volume is being unprovisioned then
# request the data is persisted before returning,
# so that it's not discarded from the cache.
conv = []
if sync and not odirect:
conv.append('fdatasync')
if sparse:
conv.append('sparse')
if conv:
conv_options = 'conv=' + ",".join(conv)
cmd.append(conv_options)
# Perform the copy
start_time = timeutils.utcnow()
execute(*cmd, run_as_root=True)
duration = timeutils.delta_seconds(start_time, timeutils.utcnow())
# NOTE(jdg): use a default of 1, mostly for unit test, but in
# some incredible event this is 0 (cirros image?) don't barf
if duration < 1:
duration = 1
mbps = (size_in_m / duration)
LOG.debug("Volume copy details: src %(src)s, dest %(dest)s, "
"size %(sz).2f MB, duration %(duration).2f sec",
{"src": srcstr,
"dest": deststr,
"sz": size_in_m,
"duration": duration})
LOG.info("Volume copy %(size_in_m).2f MB at %(mbps).2f MB/s",
{'size_in_m': size_in_m, 'mbps': mbps})
def _open_volume_with_path(path, mode):
try:
with utils.temporary_chown(path):
handle = open(path, mode)
return handle
except Exception:
LOG.error("Failed to open volume from %(path)s.", {'path': path})
def _transfer_data(src, dest, length, chunk_size):
"""Transfer data between files (Python IO objects)."""
chunks = int(math.ceil(length / chunk_size))
remaining_length = length
LOG.debug("%(chunks)s chunks of %(bytes)s bytes to be transferred.",
{'chunks': chunks, 'bytes': chunk_size})
for chunk in range(0, chunks):
before = time.time()
data = tpool.execute(src.read, min(chunk_size, remaining_length))
# If we have reached end of source, discard any extraneous bytes from
# destination volume if trim is enabled and stop writing.
if data == b'':
break
tpool.execute(dest.write, data)
remaining_length -= len(data)
delta = (time.time() - before)
rate = (chunk_size / delta) / units.Ki
LOG.debug("Transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s).",
{'chunk': chunk + 1, 'chunks': chunks, 'rate': rate})
# yield to any other pending operations
eventlet.sleep(0)
tpool.execute(dest.flush)
def _copy_volume_with_file(src, dest, size_in_m):
src_handle = src
if isinstance(src, six.string_types):
src_handle = _open_volume_with_path(src, 'rb')
dest_handle = dest
if isinstance(dest, six.string_types):
dest_handle = _open_volume_with_path(dest, 'wb')
if not src_handle:
raise exception.DeviceUnavailable(
_("Failed to copy volume, source device unavailable."))
if not dest_handle:
raise exception.DeviceUnavailable(
_("Failed to copy volume, destination device unavailable."))
start_time = timeutils.utcnow()
_transfer_data(src_handle, dest_handle, size_in_m * units.Mi, units.Mi * 4)
duration = max(1, timeutils.delta_seconds(start_time, timeutils.utcnow()))
if isinstance(src, six.string_types):
src_handle.close()
if isinstance(dest, six.string_types):
dest_handle.close()
mbps = (size_in_m / duration)
LOG.info("Volume copy completed (%(size_in_m).2f MB at "
"%(mbps).2f MB/s).",
{'size_in_m': size_in_m, 'mbps': mbps})
def copy_volume(src, dest, size_in_m, blocksize, sync=False,
execute=utils.execute, ionice=None, throttle=None,
sparse=False):
"""Copy data from the source volume to the destination volume.
The parameters 'src' and 'dest' are both typically of type str, which
represents the path to each volume on the filesystem. Connectors can
optionally return a volume handle of type RawIOBase for volumes that are
not available on the local filesystem for open/close operations.
If either 'src' or 'dest' are not of type str, then they are assumed to be
of type RawIOBase or any derivative that supports file operations such as
read and write. In this case, the handles are treated as file handles
instead of file paths and, at present moment, throttling is unavailable.
"""
if (isinstance(src, six.string_types) and
isinstance(dest, six.string_types)):
if not throttle:
throttle = throttling.Throttle.get_default()
with throttle.subcommand(src, dest) as throttle_cmd:
_copy_volume_with_path(throttle_cmd['prefix'], src, dest,
size_in_m, blocksize, sync=sync,
execute=execute, ionice=ionice,
sparse=sparse)
else:
_copy_volume_with_file(src, dest, size_in_m)
def clear_volume(volume_size, volume_path, volume_clear=None,
volume_clear_size=None, volume_clear_ionice=None,
throttle=None):
"""Unprovision old volumes to prevent data leaking between users."""
if volume_clear is None:
volume_clear = CONF.volume_clear
if volume_clear_size is None:
volume_clear_size = CONF.volume_clear_size
if volume_clear_size == 0:
volume_clear_size = volume_size
if volume_clear_ionice is None:
volume_clear_ionice = CONF.volume_clear_ionice
LOG.info("Performing secure delete on volume: %s", volume_path)
# We pass sparse=False explicitly here so that zero blocks are not
# skipped in order to clear the volume.
if volume_clear == 'zero':
return copy_volume('/dev/zero', volume_path, volume_clear_size,
CONF.volume_dd_blocksize,
sync=True, execute=utils.execute,
ionice=volume_clear_ionice,
throttle=throttle, sparse=False)
else:
raise exception.InvalidConfigurationValue(
option='volume_clear',
value=volume_clear)
def supports_thin_provisioning():
return brick_lvm.LVM.supports_thin_provisioning(
utils.get_root_helper())
def get_all_physical_volumes(vg_name=None):
return brick_lvm.LVM.get_all_physical_volumes(
utils.get_root_helper(),
vg_name)
def get_all_volume_groups(vg_name=None):
return brick_lvm.LVM.get_all_volume_groups(
utils.get_root_helper(),
vg_name)
# Default symbols to use for passwords. Avoids visually confusing characters.
# ~6 bits per symbol
DEFAULT_PASSWORD_SYMBOLS = ('23456789', # Removed: 0,1
'ABCDEFGHJKLMNPQRSTUVWXYZ', # Removed: I, O
'abcdefghijkmnopqrstuvwxyz') # Removed: l
def generate_password(length=16, symbolgroups=DEFAULT_PASSWORD_SYMBOLS):
"""Generate a random password from the supplied symbol groups.
At least one symbol from each group will be included. Unpredictable
results if length is less than the number of symbol groups.
Believed to be reasonably secure (with a reasonable password length!)
"""
# NOTE(jerdfelt): Some password policies require at least one character
# from each group of symbols, so start off with one random character
# from each symbol group
bytes = 1 # Number of random bytes to generate for each choice
password = [s[ord(urandom(bytes)) % len(s)]
for s in symbolgroups]
# If length < len(symbolgroups), the leading characters will only
# be from the first length groups. Try our best to not be predictable
# by shuffling and then truncating.
shuffle(password)
password = password[:length]
length -= len(password)
# then fill with random characters from all symbol groups
symbols = ''.join(symbolgroups)
password.extend(
[symbols[ord(urandom(bytes)) % len(symbols)]
for _i in range(length)])
# finally shuffle to ensure first x characters aren't from a
# predictable group
shuffle(password)
return ''.join(password)
def generate_username(length=20, symbolgroups=DEFAULT_PASSWORD_SYMBOLS):
# Use the same implementation as the password generation.
return generate_password(length, symbolgroups)
DEFAULT_POOL_NAME = '_pool0'
def extract_host(host, level='backend', default_pool_name=False):
"""Extract Host, Backend or Pool information from host string.
:param host: String for host, which could include host@backend#pool info
:param level: Indicate which level of information should be extracted
from host string. Level can be 'host', 'backend' or 'pool',
default value is 'backend'
:param default_pool_name: this flag specify what to do if level == 'pool'
and there is no 'pool' info encoded in host
string. default_pool_name=True will return
DEFAULT_POOL_NAME, otherwise we return None.
Default value of this parameter is False.
:return: expected information, string or None
:raises: exception.InvalidVolume
For example:
host = 'HostA@BackendB#PoolC'
ret = extract_host(host, 'host')
# ret is 'HostA'
ret = extract_host(host, 'backend')
# ret is 'HostA@BackendB'
ret = extract_host(host, 'pool')
# ret is 'PoolC'
host = 'HostX@BackendY'
ret = extract_host(host, 'pool')
# ret is None
ret = extract_host(host, 'pool', True)
# ret is '_pool0'
"""
if host is None:
msg = _("volume is not assigned to a host")
raise exception.InvalidVolume(reason=msg)
if level == 'host':
# make sure pool is not included
hst = host.split('#')[0]
return hst.split('@')[0]
elif level == 'backend':
return host.split('#')[0]
elif level == 'pool':
lst = host.split('#')
if len(lst) == 2:
return lst[1]
elif default_pool_name is True:
return DEFAULT_POOL_NAME
else:
return None
def append_host(host, pool):
"""Encode pool into host info."""
if not host or not pool:
return host
new_host = "#".join([host, pool])
return new_host
def matching_backend_name(src_volume_type, volume_type):
if src_volume_type.get('volume_backend_name') and \
volume_type.get('volume_backend_name'):
return src_volume_type.get('volume_backend_name') == \
volume_type.get('volume_backend_name')
else:
return False
def hosts_are_equivalent(host_1, host_2):
# In case host_1 or host_2 are None
if not (host_1 and host_2):
return host_1 == host_2
return extract_host(host_1) == extract_host(host_2)
def read_proc_mounts():
"""Read the /proc/mounts file.
It's a dummy function but it eases the writing of unit tests as mocking
__builtin__open() for a specific file only is not trivial.
"""
with open('/proc/mounts') as mounts:
return mounts.readlines()
def extract_id_from_volume_name(vol_name):
regex = re.compile(
CONF.volume_name_template.replace('%s', '(?P<uuid>.+)'))
match = regex.match(vol_name)
return match.group('uuid') if match else None
def check_already_managed_volume(vol_id):
"""Check cinder db for already managed volume.
:param vol_id: volume id parameter
:returns: bool -- return True, if db entry with specified
volume id exists, otherwise return False
"""
try:
return (vol_id and isinstance(vol_id, six.string_types) and
uuid.UUID(vol_id, version=4) and
objects.Volume.exists(context.get_admin_context(), vol_id))
except ValueError:
return False
def extract_id_from_snapshot_name(snap_name):
"""Return a snapshot's ID from its name on the backend."""
regex = re.compile(
CONF.snapshot_name_template.replace('%s', '(?P<uuid>.+)'))
match = regex.match(snap_name)
return match.group('uuid') if match else None
def paginate_entries_list(entries, marker, limit, offset, sort_keys,
sort_dirs):
"""Paginate a list of entries.
:param entries: list of dictionaries
:marker: The last element previously returned
:limit: The maximum number of items to return
:offset: The number of items to skip from the marker or from the first
element.
:sort_keys: A list of keys in the dictionaries to sort by
:sort_dirs: A list of sort directions, where each is either 'asc' or 'dec'
"""
comparers = [(operator.itemgetter(key.strip()), multiplier)
for (key, multiplier) in zip(sort_keys, sort_dirs)]
def comparer(left, right):
for fn, d in comparers:
left_val = fn(left)
right_val = fn(right)
if isinstance(left_val, dict):
left_val = sorted(left_val.values())[0]
if isinstance(right_val, dict):
right_val = sorted(right_val.values())[0]
if left_val == right_val:
continue
if d == 'asc':
return -1 if left_val < right_val else 1
else:
return -1 if left_val > right_val else 1
else:
return 0
sorted_entries = sorted(entries, key=functools.cmp_to_key(comparer))
start_index = 0
if offset is None:
offset = 0
if marker:
if not isinstance(marker, dict):
try:
marker = json.loads(marker)
except ValueError:
msg = _('marker %s can not be analysed, please use json like '
'format') % marker
raise exception.InvalidInput(reason=msg)
start_index = -1
for i, entry in enumerate(sorted_entries):
if entry['reference'] == marker:
start_index = i + 1
break
if start_index < 0:
msg = _('marker not found: %s') % marker
raise exception.InvalidInput(reason=msg)
range_end = start_index + limit
return sorted_entries[start_index + offset:range_end + offset]
def convert_config_string_to_dict(config_string):
"""Convert config file replication string to a dict.
The only supported form is as follows:
"{'key-1'='val-1' 'key-2'='val-2'...}"
:param config_string: Properly formatted string to convert to dict.
:response: dict of string values
"""
resultant_dict = {}
try:
st = config_string.replace("=", ":")
st = st.replace(" ", ", ")
resultant_dict = ast.literal_eval(st)
except Exception:
LOG.warning("Error encountered translating config_string: "
"%(config_string)s to dict",
{'config_string': config_string})
return resultant_dict
def create_encryption_key(context, key_manager, volume_type_id):
encryption_key_id = None
if volume_types.is_encrypted(context, volume_type_id):
volume_type_encryption = (
volume_types.get_volume_type_encryption(context,
volume_type_id))
cipher = volume_type_encryption.cipher
length = volume_type_encryption.key_size
algorithm = cipher.split('-')[0] if cipher else None
try:
encryption_key_id = key_manager.create_key(
context,
algorithm=algorithm,
length=length)
except castellan_exception.KeyManagerError:
# The messaging back to the client here is
# purposefully terse, so we don't leak any sensitive
# details.
LOG.exception("Key manager error")
raise exception.Invalid(message="Key manager error")
return encryption_key_id
def delete_encryption_key(context, key_manager, encryption_key_id):
try:
key_manager.delete(context, encryption_key_id)
except castellan_exception.ManagedObjectNotFoundError:
pass
except castellan_exception.KeyManagerError:
LOG.info("First attempt to delete key id %s failed, retrying with "
"cinder's service context.", encryption_key_id)
conf = CONF
ks_loading.register_auth_conf_options(conf, 'keystone_authtoken')
service_context = keystone_password.KeystonePassword(
password=conf.keystone_authtoken.password,
auth_url=conf.keystone_authtoken.auth_url,
username=conf.keystone_authtoken.username,
user_domain_name=conf.keystone_authtoken.user_domain_name,
project_name=conf.keystone_authtoken.project_name,
project_domain_name=conf.keystone_authtoken.project_domain_name)
try:
castellan_key_manager.API(conf).delete(service_context,
encryption_key_id)
except castellan_exception.ManagedObjectNotFoundError:
pass
def clone_encryption_key(context, key_manager, encryption_key_id):
clone_key_id = None
if encryption_key_id is not None:
clone_key_id = key_manager.store(
context,
key_manager.get(context, encryption_key_id))
return clone_key_id
def is_replicated_str(str):
spec = (str or '').split()
return (len(spec) == 2 and
spec[0] == '<is>' and strutils.bool_from_string(spec[1]))
def is_replicated_spec(extra_specs):
return (extra_specs and
is_replicated_str(extra_specs.get('replication_enabled')))
def group_get_by_id(group_id):
ctxt = context.get_admin_context()
group = db.group_get(ctxt, group_id)
return group
def is_group_a_cg_snapshot_type(group_or_snap):
LOG.debug("Checking if %s is a consistent snapshot group",
group_or_snap)
if group_or_snap["group_type_id"] is not None:
spec = group_types.get_group_type_specs(
group_or_snap["group_type_id"],
key="consistent_group_snapshot_enabled"
)
return spec == "<is> True"
return False
def is_group_a_type(group, key):
if group.group_type_id is not None:
spec = group_types.get_group_type_specs(
group.group_type_id, key=key
)
return spec == "<is> True"
return False
def get_max_over_subscription_ratio(str_value, supports_auto=False):
"""Get the max_over_subscription_ratio from a string
As some drivers need to do some calculations with the value and we are now
receiving a string value in the conf, this converts the value to float
when appropriate.
:param str_value: Configuration object
:param supports_auto: Tell if the calling driver supports auto MOSR.
:param drv_msg: Error message from the caller
:response: value of mosr
"""
if not supports_auto and str_value == "auto":
msg = _("This driver does not support automatic "
"max_over_subscription_ratio calculation. Please use a "
"valid float value.")
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
if str_value == 'auto':
return str_value
mosr = float(str_value)
if mosr < 1:
msg = _("The value of max_over_subscription_ratio must be "
"greater than 1.")
LOG.error(msg)
raise exception.InvalidParameterValue(message=msg)
return mosr
| |
# -*- coding: utf-8 -*-
#
# SQLAlchemy documentation build configuration file, created by
# sphinx-quickstart on Wed Nov 26 19:50:10 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import traceback
def force_install_reqs():
import logging
log = logging.getLogger("pip")
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(logging.Formatter("[pip] %(message)s"))
log.addHandler(handler)
log.setLevel(logging.INFO)
log.info("READTHEDOCS is set, force-installing requirements.txt")
from pip.commands import install
req = os.path.join(os.path.dirname(__file__), "requirements.txt")
cmd = install.InstallCommand()
options, args = cmd.parse_args(["-v", "-U", "-r", req])
cmd.run(options, args)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../lib'))
sys.path.insert(0, os.path.abspath('../..')) # examples
sys.path.insert(0, os.path.abspath('.'))
import sqlalchemy
# attempt to force pip to definitely get the latest
# versions of libraries, see
# https://github.com/rtfd/readthedocs.org/issues/1293
rtd = os.environ.get('READTHEDOCS', None) == 'True'
if rtd:
try:
force_install_reqs()
except:
traceback.print_exc()
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'zzzeeksphinx',
'changelog',
'sphinx_paramlinks',
#'corrections'
]
# Add any paths that contain templates here, relative to this directory.
# not sure why abspath() is needed here, some users
# have reported this.
templates_path = [os.path.abspath('templates')]
nitpicky = True
# The suffix of source filenames.
source_suffix = '.rst'
# section names used by the changelog extension.
changelog_sections = ["general", "orm", "orm declarative", "orm querying", \
"orm configuration", "engine", "sql", \
"schema", \
"postgresql", "mysql", "sqlite", "mssql", \
"oracle", "firebird"]
# tags to sort on inside of sections
changelog_inner_tag_sort = ["feature", "changed", "removed", "bug", "moved"]
# how to render changelog links
changelog_render_ticket = "http://www.sqlalchemy.org/trac/ticket/%s"
changelog_render_pullreq = {
"bitbucket": "https://bitbucket.org/zzzeek/sqlalchemy/pull-request/%s",
"default": "https://bitbucket.org/zzzeek/sqlalchemy/pull-request/%s",
"github": "https://github.com/zzzeek/sqlalchemy/pull/%s",
}
changelog_render_changeset = "http://www.sqlalchemy.org/trac/changeset/%s"
autodocmods_convert_modname = {
"sqlalchemy.sql.sqltypes": "sqlalchemy.types",
"sqlalchemy.sql.type_api": "sqlalchemy.types",
"sqlalchemy.sql.schema": "sqlalchemy.schema",
"sqlalchemy.sql.elements": "sqlalchemy.sql.expression",
"sqlalchemy.sql.selectable": "sqlalchemy.sql.expression",
"sqlalchemy.sql.dml": "sqlalchemy.sql.expression",
"sqlalchemy.sql.ddl": "sqlalchemy.schema",
"sqlalchemy.sql.base": "sqlalchemy.sql.expression",
"sqlalchemy.engine.base": "sqlalchemy.engine",
"sqlalchemy.engine.result": "sqlalchemy.engine",
}
autodocmods_convert_modname_w_class = {
("sqlalchemy.engine.interfaces", "Connectable"): "sqlalchemy.engine",
("sqlalchemy.sql.base", "DialectKWArgs"): "sqlalchemy.sql.base",
}
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'contents'
# General information about the project.
project = u'SQLAlchemy'
copyright = u'2007-2015, the SQLAlchemy authors and contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "1.1"
# The full version, including alpha/beta/rc tags.
release = "1.1.0b1"
release_date = "not released"
site_base = os.environ.get("RTD_SITE_BASE", "http://www.sqlalchemy.org")
site_adapter_template = "docs_adapter.mako"
site_adapter_py = "docs_adapter.py"
# arbitrary number recognized by builders.py, incrementing this
# will force a rebuild
build_number = 3
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# have the "gettext" build generate .pot for each individual
# .rst
gettext_compact = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'zzzeeksphinx'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'default.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "%s %s Documentation" % (project, version)
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%m/%d/%Y %H:%M:%S'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
html_copy_source = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'SQLAlchemydoc'
#autoclass_content = 'both'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('contents', 'sqlalchemy_%s.tex' % release.replace('.', '_'), ur'SQLAlchemy Documentation',
ur'Mike Bayer', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
# sets TOC depth to 2.
latex_preamble = '\setcounter{tocdepth}{3}'
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
#latex_elements = {
# 'papersize': 'letterpaper',
# 'pointsize': '10pt',
#}
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'sqlalchemy', u'SQLAlchemy Documentation',
[u'SQLAlchemy authors'], 1)
]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'SQLAlchemy'
epub_author = u'SQLAlchemy authors'
epub_publisher = u'SQLAlchemy authors'
epub_copyright = u'2007-2015, SQLAlchemy authors'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
intersphinx_mapping = {
'alembic': ('http://alembic.readthedocs.org/en/latest/', None),
'psycopg2': ('http://pythonhosted.org/psycopg2', None),
}
| |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db.models.fields import Field
from django.core import exceptions
from django.utils.functional import cached_property
from django.utils.text import capfirst
from django.utils import six
from collectionfield.converter import CollectionConverter
from collectionfield.validators import (
ConvertedMaxLengthValidator, MaxItemsValidator
)
from collectionfield import forms as cf_forms
from .lookups import Has, HasAll, HasAny
def create_get_field_display(field):
"""
Creates `get_FIELD_display` model method for CollectionField with choices
:param field: a CollectionField instance
:return: function
"""
def default_mapper(value, label):
return label
def default_wrapper(field_display):
return field_display
def get_field_display(obj, delimiter=", ", mapper=None, wrapper=None):
"""
Renders current field value as string with possibility to
override the behavior using extra params:
:param obj: current model instance
:param delimiter: string used to join the selected options
:param mapper: function to map value and label into string
that represents single item of the collection
(outputs label by default),
:param wrapper: function that maps joined selected options into final
string representation (does nothing by default).
:return: string representation of selected choices
"""
mapper = mapper or default_mapper
wrapper = wrapper or default_wrapper
selected_values = getattr(obj, field.attname)
if hasattr(field, 'flatchoices'): # Django 1.9
value_to_label = dict(field.flatchoices)
else:
value_to_label = dict(field.get_flatchoices())
labels = []
for single_value in selected_values:
single_label = value_to_label[single_value]
labels.append(mapper(single_value, single_label))
field_display = delimiter.join(labels)
return wrapper(field_display)
return get_field_display
class CollectionField(Field):
"""Model field to store collections"""
def __init__(self, verbose_name=None, name=None, collection_type=list,
item_type=six.text_type, sort=False, unique_items=None,
max_items=None, delimiter='|', **kwargs):
"""
:param collection_type: type (class) of this collection
:param item_type: type (class) of this collection's items
:param sort: should items be sorted automatically?
:param unique_items: should duplicate items be dropped?
:param max_items: maximum number of items (enforced on validation)
:param delimiter: separates items in string representation stored in db
:param max_length: maximum length string representation
of this collection
"""
self.collection_type = collection_type
self.item_type = item_type
self.sort = sort
self.unique_items = unique_items
self.max_items = max_items
self.delimiter = delimiter
kwargs['max_length'] = kwargs.get('max_length', 1024)
kwargs['default'] = kwargs.get('default', self.collection_type)
empty_collection = collection_type()
if empty_collection not in self.empty_values:
self.empty_values = self.empty_values[:] + [empty_collection]
super(CollectionField, self).__init__(
verbose_name=verbose_name, name=name, **kwargs
)
if self.max_items:
self.validators.append(MaxItemsValidator(self.max_items))
self.validators.append(
ConvertedMaxLengthValidator(
limit_value=self.max_length,
collection_type=self.collection_type,
item_type=self.item_type,
sort=self.sort,
unique_items=self._has_unique_items(),
delimiter=self.delimiter
)
)
def deconstruct(self):
name, path, args, kwargs = super(CollectionField, self).deconstruct()
if self.collection_type is not list:
kwargs['collection_type'] = self.collection_type
if self.item_type is not six.text_type:
kwargs['item_type'] = self.item_type
if self.sort:
kwargs['sort'] = True
if self.unique_items is not None:
kwargs['unique_items'] = self.unique_items
if self.max_items is not None:
kwargs['max_items'] = self.max_items
if self.delimiter != '|':
kwargs['delimiter'] = self.delimiter
if kwargs.get('max_length') == 1024:
del kwargs['max_length']
if kwargs.get('default') is self.collection_type:
del kwargs['default']
return name, path, args, kwargs
def get_internal_type(self):
return 'CharField'
def from_db_value(self, value, expression, connection, context):
return self.collection_converter.load(value)
def to_python(self, value):
if isinstance(value, self.collection_type):
return value
else:
return self.collection_converter.load(value)
def get_prep_value(self, value):
return self.collection_converter.dump(value)
def value_to_string(self, obj):
"""
Returns a string value of this field from the passed obj.
This is used by the serialization framework.
"""
value = self.value_from_object(obj)
return self.get_prep_value(value)
def validate(self, value, model_instance):
"""
Validates value and throws ValidationError.
Copied to fix builtin validation of choices.
"""
if not self.editable:
# Skip validation for non-editable fields.
return
if self.choices and value not in self.empty_values:
selected_choices = set(value)
for option_key, option_value in self.choices:
if isinstance(option_value, (list, tuple)):
# This is an optgroup, so look inside the group for
# options.
for optgroup_key, optgroup_value in option_value:
if optgroup_key in selected_choices:
selected_choices.remove(optgroup_key)
elif option_key in selected_choices:
selected_choices.remove(option_key)
if selected_choices:
raise exceptions.ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': list(selected_choices)[0]},
)
if value is None and not self.null:
raise exceptions.ValidationError(
self.error_messages['null'], code='null'
)
if not self.blank and value in self.empty_values:
raise exceptions.ValidationError(
self.error_messages['blank'], code='blank'
)
def formfield(self, **kwargs):
"""Form field instance for this model field"""
if self.choices:
form_class = (
kwargs.pop('choices_form_class', None) or
cf_forms.CollectionChoiceField
)
else:
form_class = (
kwargs.pop('form_class', None) or cf_forms.CollectionField
)
if getattr(form_class, 'collection_field_compatible', False):
defaults = {
'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text,
# CollectionField-specific params:
'collection_type': self.collection_type,
'item_type': self.item_type,
'sort': self.sort,
'unique_items': self._has_unique_items(),
'max_items': self.max_items,
'max_length': self.max_length,
}
if self.has_default():
if callable(self.default):
defaults['initial'] = self.default
defaults['show_hidden_initial'] = True
else:
defaults['initial'] = self.get_default()
if self.choices:
defaults['choices'] = self.get_choices(include_blank=False)
defaults.update(kwargs)
return form_class(**defaults)
else:
return super(CollectionField, self).formfield(**kwargs)
def contribute_to_class(self, cls, name, virtual_only=False):
"""Overrides `get_FIELD_display` on model class"""
super(CollectionField, self).contribute_to_class(
cls, name, virtual_only=virtual_only
)
if self.choices:
setattr(
cls, 'get_{0}_display'.format(name),
create_get_field_display(self)
)
# Custom methods:
def _has_unique_items(self):
return bool(self.unique_items) if not self.choices else True
@cached_property
def collection_converter(self):
return CollectionConverter(
collection_type=self.collection_type,
item_type=self.item_type,
sort=self.sort,
unique_items=self._has_unique_items(),
delimiter=self.delimiter
)
# Register field lookups:
CollectionField.register_lookup(Has)
CollectionField.register_lookup(HasAll)
CollectionField.register_lookup(HasAny)
| |
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
"""
The pex.bin.pex utility builds PEX environments and .pex files specified by
sources, requirements and their dependencies.
"""
from __future__ import absolute_import, print_function
import os
import sys
import tempfile
from argparse import Action, ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from textwrap import TextWrapper
from pex import pex_warnings
from pex.common import atomic_directory, die, open_zip, safe_mkdtemp
from pex.inherit_path import InheritPath
from pex.interpreter import PythonInterpreter
from pex.interpreter_constraints import (
UnsatisfiableInterpreterConstraintsError,
validate_constraints,
)
from pex.jobs import DEFAULT_MAX_JOBS
from pex.network_configuration import NetworkConfiguration
from pex.orderedset import OrderedSet
from pex.pex import PEX
from pex.pex_bootstrapper import ensure_venv, iter_compatible_interpreters
from pex.pex_builder import CopyMode, PEXBuilder
from pex.pip import ResolverVersion
from pex.platforms import Platform
from pex.resolver import Unsatisfiable, parsed_platform, resolve_from_pex, resolve_multi
from pex.tracer import TRACER
from pex.typing import TYPE_CHECKING
from pex.variables import ENV, Variables
from pex.venv_bin_path import BinPath
from pex.version import __version__
if TYPE_CHECKING:
from typing import List, Iterable
from argparse import Namespace
CANNOT_SETUP_INTERPRETER = 102
INVALID_OPTIONS = 103
def log(msg, V=0):
if V != 0:
print(msg, file=sys.stderr)
_PYPI = "https://pypi.org/simple"
_DEFAULT_MANYLINUX_STANDARD = "manylinux2014"
class HandleBoolAction(Action):
def __init__(self, *args, **kwargs):
kwargs["nargs"] = 0
super(HandleBoolAction, self).__init__(*args, **kwargs)
def __call__(self, parser, namespace, value, option_str=None):
setattr(namespace, self.dest, not option_str.startswith("--no"))
class ManylinuxAction(Action):
def __call__(self, parser, namespace, value, option_str=None):
if option_str.startswith("--no"):
setattr(namespace, self.dest, None)
elif value.startswith("manylinux"):
setattr(namespace, self.dest, value)
else:
raise ArgumentTypeError(
"Please specify a manylinux standard; ie: --manylinux=manylinux1. "
"Given {}".format(value)
)
class HandleTransitiveAction(Action):
def __init__(self, *args, **kwargs):
kwargs["nargs"] = 0
super(HandleTransitiveAction, self).__init__(*args, **kwargs)
def __call__(self, parser, namespace, value, option_str=None):
setattr(namespace, self.dest, option_str == "--transitive")
class HandleVenvAction(Action):
def __init__(self, *args, **kwargs):
kwargs["nargs"] = "?"
kwargs["choices"] = (BinPath.PREPEND.value, BinPath.APPEND.value)
super(HandleVenvAction, self).__init__(*args, **kwargs)
def __call__(self, parser, namespace, value, option_str=None):
bin_path = BinPath.FALSE if value is None else BinPath.for_value(value)
setattr(namespace, self.dest, bin_path)
class PrintVariableHelpAction(Action):
def __call__(self, parser, namespace, values, option_str=None):
for variable_name, variable_type, variable_help in Variables.iter_help():
print("\n%s: %s\n" % (variable_name, variable_type))
for line in TextWrapper(initial_indent=" " * 4, subsequent_indent=" " * 4).wrap(
variable_help
):
print(line)
sys.exit(0)
def process_platform(option_str):
try:
return parsed_platform(option_str)
except Platform.InvalidPlatformError as e:
raise ArgumentTypeError("{} is an invalid platform:\n{}".format(option_str, e))
def configure_clp_pex_resolution(parser):
# type: (ArgumentParser) -> None
group = parser.add_argument_group(
"Resolver options",
"Tailor how to find, resolve and translate the packages that get put into the PEX "
"environment.",
)
group.add_argument(
"--resolver-version",
dest="resolver_version",
default=ResolverVersion.PIP_LEGACY.value,
choices=[choice.value for choice in ResolverVersion.values],
help="The dependency resolver version to use. Read more at "
"https://pip.pypa.io/en/stable/user_guide/#resolver-changes-2020",
)
group.add_argument(
"--pypi",
"--no-pypi",
"--no-index",
dest="pypi",
action=HandleBoolAction,
default=True,
help="Whether to use PyPI to resolve dependencies.",
)
group.add_argument(
"--pex-path",
dest="pex_path",
type=str,
default=None,
help="A colon separated list of other pex files to merge into the runtime environment.",
)
group.add_argument(
"-f",
"--find-links",
"--repo",
metavar="PATH/URL",
action="append",
dest="find_links",
type=str,
default=[],
help="Additional repository path (directory or URL) to look for requirements.",
)
group.add_argument(
"-i",
"--index",
"--index-url",
metavar="URL",
action="append",
dest="indexes",
type=str,
help="Additional cheeseshop indices to use to satisfy requirements.",
)
parser.add_argument(
"--pex-repository",
dest="pex_repository",
metavar="FILE",
default=None,
type=str,
help=(
"Resolve requirements from the given PEX file instead of from --index servers or "
"--find-links repos."
),
)
default_net_config = NetworkConfiguration()
group.add_argument(
"--cache-ttl",
metavar="DEPRECATED",
default=None,
type=int,
help="Deprecated: No longer used.",
)
group.add_argument(
"--retries",
default=default_net_config.retries,
type=int,
help="Maximum number of retries each connection should attempt.",
)
group.add_argument(
"--timeout",
metavar="SECS",
default=default_net_config.timeout,
type=int,
help="Set the socket timeout in seconds.",
)
group.add_argument(
"-H",
"--header",
dest="headers",
metavar="DEPRECATED",
default=None,
type=str,
action="append",
help="Deprecated: No longer used.",
)
group.add_argument(
"--proxy",
type=str,
default=None,
help="Specify a proxy in the form [user:passwd@]proxy.server:port.",
)
group.add_argument(
"--cert", metavar="PATH", type=str, default=None, help="Path to alternate CA bundle."
)
group.add_argument(
"--client-cert",
metavar="PATH",
type=str,
default=None,
help="Path to an SSL client certificate which should be a single file containing the private "
"key and the certificate in PEM format.",
)
group.add_argument(
"--pre",
"--no-pre",
dest="allow_prereleases",
default=False,
action=HandleBoolAction,
help="Whether to include pre-release and development versions of requirements.",
)
group.add_argument(
"--disable-cache",
dest="disable_cache",
default=False,
action="store_true",
help="Disable caching in the pex tool entirely.",
)
group.add_argument(
"--cache-dir",
dest="cache_dir",
default=None,
help="DEPRECATED: Use --pex-root instead. "
"The local cache directory to use for speeding up requirement lookups.",
)
group.add_argument(
"--wheel",
"--no-wheel",
"--no-use-wheel",
dest="use_wheel",
default=True,
action=HandleBoolAction,
help="Whether to allow wheel distributions.",
)
group.add_argument(
"--build",
"--no-build",
dest="build",
default=True,
action=HandleBoolAction,
help="Whether to allow building of distributions from source.",
)
group.add_argument(
"--manylinux",
"--no-manylinux",
"--no-use-manylinux",
dest="manylinux",
type=str,
default=_DEFAULT_MANYLINUX_STANDARD,
action=ManylinuxAction,
help="Whether to allow resolution of manylinux wheels for linux target platforms.",
)
group.add_argument(
"--transitive",
"--no-transitive",
"--intransitive",
dest="transitive",
default=True,
action=HandleTransitiveAction,
help="Whether to transitively resolve requirements.",
)
group.add_argument(
"-j",
"--jobs",
metavar="JOBS",
dest="max_parallel_jobs",
type=int,
default=DEFAULT_MAX_JOBS,
help="The maximum number of parallel jobs to use when resolving, building and installing "
"distributions. You might want to increase the maximum number of parallel jobs to "
"potentially improve the latency of the pex creation process at the expense of other"
"processes on your system.",
)
def configure_clp_pex_options(parser):
# type: (ArgumentParser) -> None
group = parser.add_argument_group(
"PEX output options",
"Tailor the behavior of the emitted .pex file if -o is specified.",
)
group.add_argument(
"--include-tools",
dest="include_tools",
default=False,
action=HandleBoolAction,
help="Whether to include runtime tools in the pex file. If included, these can be run by "
"exporting PEX_TOOLS=1 and following the usage and --help information.",
)
group.add_argument(
"--zip-safe",
"--not-zip-safe",
dest="zip_safe",
default=True,
action=HandleBoolAction,
help="Whether or not the sources in the pex file are zip safe. If they are not zip safe, "
"they will be written to disk prior to execution. Also see --unzip which will cause the "
"complete pex file, including dependencies, to be unzipped.",
)
runtime_mode = group.add_mutually_exclusive_group()
runtime_mode.add_argument(
"--unzip",
"--no-unzip",
dest="unzip",
default=False,
action=HandleBoolAction,
help="Whether or not the pex file should be unzipped before executing it. If the pex file will "
"be run multiple times under a stable runtime PEX_ROOT the unzipping will only be "
"performed once and subsequent runs will enjoy lower startup latency.",
)
runtime_mode.add_argument(
"--venv",
dest="venv",
metavar="{prepend,append}",
default=False,
action=HandleVenvAction,
help="Convert the pex file to a venv before executing it. If 'prepend' or 'append' is "
"specified, then all scripts and console scripts provided by distributions in the pex file "
"will be added to the PATH in the corresponding position. If the the pex file will be run "
"multiple times under a stable runtime PEX_ROOT, the venv creation will only be done once "
"and subsequent runs will enjoy lower startup latency.",
)
group.add_argument(
"--venv-copies",
"--no-venv-copies",
dest="venv_copies",
default=False,
action=HandleBoolAction,
help=(
"If --venv is specified, create the venv using copies of base interpreter files "
"instead of symlinks."
),
)
group.add_argument(
"--always-write-cache",
dest="always_write_cache",
default=False,
action="store_true",
help="Always write the internally cached distributions to disk prior to invoking "
"the pex source code. This can use less memory in RAM constrained "
"environments.",
)
group.add_argument(
"--ignore-errors",
dest="ignore_errors",
default=False,
action="store_true",
help="Ignore requirement resolution solver errors when building pexes and later invoking "
"them.",
)
group.add_argument(
"--inherit-path",
dest="inherit_path",
default=InheritPath.FALSE.value,
choices=[choice.value for choice in InheritPath.values],
help="Inherit the contents of sys.path (including site-packages, user site-packages and "
"PYTHONPATH) running the pex. Possible values: {false} (does not inherit sys.path), "
"{fallback} (inherits sys.path after packaged dependencies), {prefer} (inherits sys.path "
"before packaged dependencies), No value (alias for prefer, for backwards "
"compatibility).".format(
false=InheritPath.FALSE, fallback=InheritPath.FALLBACK, prefer=InheritPath.PREFER
),
)
group.add_argument(
"--compile",
"--no-compile",
dest="compile",
default=False,
action=HandleBoolAction,
help="Compiling means that the built pex will include .pyc files, which will result in "
"slightly faster startup performance. However, compiling means that the generated pex "
"likely will not be reproducible, meaning that if you were to run `./pex -o` with the "
"same inputs then the new pex would not be byte-for-byte identical to the original.",
)
group.add_argument(
"--use-system-time",
"--no-use-system-time",
dest="use_system_time",
default=False,
action=HandleBoolAction,
help="Use the current system time to generate timestamps for the new pex. Otherwise, Pex "
"will use midnight on January 1, 1980. By using system time, the generated pex "
"will not be reproducible, meaning that if you were to run `./pex -o` with the "
"same inputs then the new pex would not be byte-for-byte identical to the original.",
)
group.add_argument(
"--runtime-pex-root",
dest="runtime_pex_root",
default=None,
help="Specify the pex root to be used in the generated .pex file (if unspecified, "
"uses ~/.pex).",
)
group.add_argument(
"--strip-pex-env",
"--no-strip-pex-env",
dest="strip_pex_env",
default=True,
action=HandleBoolAction,
help="Strip all `PEX_*` environment variables used to control the pex runtime before handing "
"off control to the pex entrypoint. You might want to set this to `False` if the new "
"pex executes other pexes (or the Pex CLI itself) and you want the executed pex to be "
"controllable via `PEX_*` environment variables.",
)
def configure_clp_pex_environment(parser):
# type: (ArgumentParser) -> None
group = parser.add_argument_group(
"PEX environment options",
"Tailor the interpreter and platform targets for the PEX environment.",
)
group.add_argument(
"--python",
dest="python",
default=[],
type=str,
action="append",
help=(
"The Python interpreter to use to build the PEX (default: current interpreter). This "
"cannot be used with `--interpreter-constraint`, which will instead cause PEX to "
"search for valid interpreters. Either specify an absolute path to an interpreter, or "
"specify a binary accessible on $PATH like `python3.7`. This option can be passed "
"multiple times to create a multi-interpreter compatible PEX."
),
)
group.add_argument(
"--python-path",
dest="python_path",
default=None,
type=str,
help=(
"Colon-separated paths to search for interpreters when `--interpreter-constraint` "
"and/or `--resolve-local-platforms` are specified (default: $PATH). Each element "
"can be the absolute path of an interpreter binary or a directory containing "
"interpreter binaries."
),
)
current_interpreter = PythonInterpreter.get()
program = sys.argv[0]
singe_interpreter_info_cmd = (
"PEX_TOOLS=1 {current_interpreter} {program} interpreter --verbose --indent 4".format(
current_interpreter=current_interpreter.binary, program=program
)
)
all_interpreters_info_cmd = (
"PEX_TOOLS=1 {program} interpreter --all --verbose --indent 4".format(program=program)
)
group.add_argument(
"--interpreter-constraint",
dest="interpreter_constraint",
default=[],
type=str,
action="append",
help=(
"Constrain the selected Python interpreter. Specify with Requirement-style syntax, "
'e.g. "CPython>=2.7,<3" (A CPython interpreter with version >=2.7 AND version <3), '
'">=2.7,<3" (Any Python interpreter with version >=2.7 AND version <3) or "PyPy" (A '
"PyPy interpreter of any version). This argument may be repeated multiple times to OR "
"the constraints. Try `{singe_interpreter_info_cmd}` to find the exact interpreter "
"constraints of {current_interpreter} and `{all_interpreters_info_cmd}` to find out "
"the interpreter constraints of all Python interpreters on the $PATH.".format(
current_interpreter=current_interpreter.binary,
singe_interpreter_info_cmd=singe_interpreter_info_cmd,
all_interpreters_info_cmd=all_interpreters_info_cmd,
)
),
)
group.add_argument(
"--rcfile",
dest="rc_file",
default=None,
help=(
"An additional path to a pexrc file to read during configuration parsing, in addition "
"to reading `/etc/pexrc` and `~/.pexrc`. If `PEX_IGNORE_RCFILES=true`, then all rc "
"files will be ignored."
),
)
group.add_argument(
"--python-shebang",
dest="python_shebang",
default=None,
help="The exact shebang (#!...) line to add at the top of the PEX file minus the "
"#!. This overrides the default behavior, which picks an environment Python "
"interpreter compatible with the one used to build the PEX file.",
)
group.add_argument(
"--platform",
dest="platforms",
default=[],
type=process_platform,
action="append",
help=(
"The platform for which to build the PEX. This option can be passed multiple times "
"to create a multi-platform pex. To use the platform corresponding to the current "
"interpreter you can pass `current`. To target any other platform you pass a string "
"composed of fields: <platform>-<python impl abbr>-<python version>-<abi>. "
"These fields stem from wheel name conventions as outlined in "
"https://www.python.org/dev/peps/pep-0427#file-name-convention and influenced by "
"https://www.python.org/dev/peps/pep-0425. For the current interpreter at "
"{current_interpreter} the full platform string is {current_platform}. To find out "
"more, try `{all_interpreters_info_cmd}` to print out the platform for all "
"interpreters on the $PATH or `{singe_interpreter_info_cmd}` to inspect the single "
"interpreter {current_interpreter}.".format(
current_interpreter=current_interpreter.binary,
current_platform=current_interpreter.platform,
singe_interpreter_info_cmd=singe_interpreter_info_cmd,
all_interpreters_info_cmd=all_interpreters_info_cmd,
)
),
)
group.add_argument(
"--resolve-local-platforms",
dest="resolve_local_platforms",
default=False,
action=HandleBoolAction,
help="When --platforms are specified, attempt to resolve a local interpreter that matches "
"each platform specified. If found, use the interpreter to resolve distributions; if "
"not (or if this option is not specified), resolve for each platform only allowing "
"matching binary distributions and failing if only sdists or non-matching binary "
"distributions can be found.",
)
def configure_clp_pex_entry_points(parser):
# type: (ArgumentParser) -> None
group = parser.add_argument_group(
"PEX entry point options",
"Specify what target/module the PEX should invoke if any.",
)
group.add_argument(
"-m",
"-e",
"--entry-point",
dest="entry_point",
metavar="MODULE[:SYMBOL]",
default=None,
help="Set the entry point to module or module:symbol. If just specifying module, pex "
"behaves like python -m, e.g. python -m SimpleHTTPServer. If specifying "
"module:symbol, pex assume symbol is a n0-arg callable and imports that symbol and invokes "
"it as if via `sys.exit(symbol())`.",
)
group.add_argument(
"-c",
"--script",
"--console-script",
dest="script",
default=None,
metavar="SCRIPT_NAME",
help="Set the entry point as to the script or console_script as defined by a any of the "
'distributions in the pex. For example: "pex -c fab fabric" or "pex -c mturk boto".',
)
group.add_argument(
"--validate-entry-point",
dest="validate_ep",
default=False,
action="store_true",
help="Validate the entry point by importing it in separate process. Warning: this could have "
"side effects. For example, entry point `a.b.c:m` will translate to "
"`from a.b.c import m` during validation.",
)
def configure_clp():
# type: () -> ArgumentParser
usage = (
"%(prog)s [-o OUTPUT.PEX] [options] [-- arg1 arg2 ...]\n\n"
"%(prog)s builds a PEX (Python Executable) file based on the given specifications: "
"sources, requirements, their dependencies and other options."
"\n"
"Command-line options can be provided in one or more files by prefixing the filenames "
"with an @ symbol. These files must contain one argument per line."
)
parser = ArgumentParser(
usage=usage,
formatter_class=ArgumentDefaultsHelpFormatter,
fromfile_prefix_chars="@",
)
parser.add_argument("--version", action="version", version=__version__)
parser.add_argument("requirements", nargs="*", help="Requirements to add to the pex")
configure_clp_pex_resolution(parser)
configure_clp_pex_options(parser)
configure_clp_pex_environment(parser)
configure_clp_pex_entry_points(parser)
parser.add_argument(
"-o",
"--output-file",
dest="pex_name",
default=None,
help="The name of the generated .pex file: Omitting this will run PEX "
"immediately and not save it to a file.",
)
parser.add_argument(
"-p",
"--preamble-file",
dest="preamble_file",
metavar="FILE",
default=None,
type=str,
help="The name of a file to be included as the preamble for the generated .pex file",
)
parser.add_argument(
"-D",
"--sources-directory",
dest="sources_directory",
metavar="DIR",
default=[],
type=str,
action="append",
help=(
"Add a directory containing sources and/or resources to be packaged into the generated "
".pex file. This option can be used multiple times."
),
)
parser.add_argument(
"-R",
"--resources-directory",
dest="resources_directory",
metavar="DIR",
default=[],
type=str,
action="append",
help=(
"Add resources directory to be packaged into the generated .pex file."
" This option can be used multiple times. DEPRECATED: Use -D/--sources-directory "
"instead."
),
)
parser.add_argument(
"-r",
"--requirement",
dest="requirement_files",
metavar="FILE or URL",
default=[],
type=str,
action="append",
help="Add requirements from the given requirements file. This option can be used multiple "
"times.",
)
parser.add_argument(
"--constraints",
dest="constraint_files",
metavar="FILE or URL",
default=[],
type=str,
action="append",
help="Add constraints from the given constraints file. This option can be used multiple "
"times.",
)
parser.add_argument(
"--requirements-pex",
dest="requirements_pexes",
metavar="FILE",
default=[],
type=str,
action="append",
help="Add requirements from the given .pex file. This option can be used multiple times.",
)
parser.add_argument(
"-v",
dest="verbosity",
action="count",
default=0,
help="Turn on logging verbosity, may be specified multiple times.",
)
parser.add_argument(
"--emit-warnings",
"--no-emit-warnings",
dest="emit_warnings",
action=HandleBoolAction,
default=True,
help="Emit runtime UserWarnings on stderr. If false, only emit them when PEX_VERBOSE is set.",
)
parser.add_argument(
"--pex-root",
dest="pex_root",
default=None,
help="Specify the pex root used in this invocation of pex "
"(if unspecified, uses {}).".format(ENV.PEX_ROOT),
)
parser.add_argument(
"--tmpdir",
dest="tmpdir",
default=tempfile.gettempdir(),
help="Specify the temporary directory Pex and its subprocesses should use.",
)
parser.add_argument(
"--seed",
"--no-seed",
dest="seed",
action=HandleBoolAction,
default=False,
help="Seed local Pex caches for the generated PEX and print out the command line to run "
"directly from the seed with.",
)
parser.add_argument(
"--help-variables",
action=PrintVariableHelpAction,
nargs=0,
help="Print out help about the various environment variables used to change the behavior of "
"a running PEX file.",
)
return parser
def _safe_link(src, dst):
try:
os.unlink(dst)
except OSError:
pass
os.symlink(src, dst)
def compute_indexes(options):
# type: (Namespace) -> List[str]
indexes = ([_PYPI] if options.pypi else []) + (options.indexes or [])
return list(OrderedSet(indexes))
def build_pex(reqs, options, cache=None):
interpreters = None # Default to the current interpreter.
pex_python_path = options.python_path # If None, this will result in using $PATH.
# TODO(#1075): stop looking at PEX_PYTHON_PATH and solely consult the `--python-path` flag.
if pex_python_path is None and (options.rc_file or not ENV.PEX_IGNORE_RCFILES):
rc_variables = Variables(rc=options.rc_file)
pex_python_path = rc_variables.PEX_PYTHON_PATH
# NB: options.python and interpreter constraints cannot be used together.
if options.python:
with TRACER.timed("Resolving interpreters", V=2):
def to_python_interpreter(full_path_or_basename):
if os.path.isfile(full_path_or_basename):
return PythonInterpreter.from_binary(full_path_or_basename)
else:
interp = PythonInterpreter.from_env(full_path_or_basename)
if interp is None:
die("Failed to find interpreter: %s" % full_path_or_basename)
return interp
interpreters = [to_python_interpreter(interp) for interp in options.python]
elif options.interpreter_constraint:
with TRACER.timed("Resolving interpreters", V=2):
constraints = options.interpreter_constraint
validate_constraints(constraints)
try:
interpreters = list(
iter_compatible_interpreters(
path=pex_python_path, interpreter_constraints=constraints
)
)
except UnsatisfiableInterpreterConstraintsError as e:
die(
e.create_message("Could not find a compatible interpreter."),
CANNOT_SETUP_INTERPRETER,
)
platforms = OrderedSet(options.platforms)
interpreters = interpreters or []
if options.platforms and options.resolve_local_platforms:
with TRACER.timed(
"Searching for local interpreters matching {}".format(", ".join(map(str, platforms)))
):
candidate_interpreters = OrderedSet(iter_compatible_interpreters(path=pex_python_path))
candidate_interpreters.add(PythonInterpreter.get())
for candidate_interpreter in candidate_interpreters:
resolved_platforms = candidate_interpreter.supported_platforms.intersection(
platforms
)
if resolved_platforms:
for resolved_platform in resolved_platforms:
TRACER.log(
"Resolved {} for platform {}".format(
candidate_interpreter, resolved_platform
)
)
platforms.remove(resolved_platform)
interpreters.append(candidate_interpreter)
if platforms:
TRACER.log(
"Could not resolve a local interpreter for {}, will resolve only binary distributions "
"for {}.".format(
", ".join(map(str, platforms)),
"this platform" if len(platforms) == 1 else "these platforms",
)
)
interpreter = (
PythonInterpreter.latest_release_of_min_compatible_version(interpreters)
if interpreters
else None
)
try:
with open(options.preamble_file) as preamble_fd:
preamble = preamble_fd.read()
except TypeError:
# options.preamble_file is None
preamble = None
pex_builder = PEXBuilder(
path=safe_mkdtemp(),
interpreter=interpreter,
preamble=preamble,
copy_mode=CopyMode.SYMLINK,
include_tools=options.include_tools or options.venv,
)
if options.resources_directory:
pex_warnings.warn(
"The `-R/--resources-directory` option is deprecated. Resources should be added via "
"`-D/--sources-directory` instead."
)
for directory in OrderedSet(options.sources_directory + options.resources_directory):
src_dir = os.path.normpath(directory)
for root, _, files in os.walk(src_dir):
for f in files:
src_file_path = os.path.join(root, f)
dst_path = os.path.relpath(src_file_path, src_dir)
pex_builder.add_source(src_file_path, dst_path)
pex_info = pex_builder.info
pex_info.zip_safe = options.zip_safe
pex_info.unzip = options.unzip
pex_info.venv = bool(options.venv)
pex_info.venv_bin_path = options.venv
pex_info.venv_copies = options.venv_copies
pex_info.pex_path = options.pex_path
pex_info.always_write_cache = options.always_write_cache
pex_info.ignore_errors = options.ignore_errors
pex_info.emit_warnings = options.emit_warnings
pex_info.inherit_path = InheritPath.for_value(options.inherit_path)
pex_info.pex_root = options.runtime_pex_root
pex_info.strip_pex_env = options.strip_pex_env
if options.interpreter_constraint:
for ic in options.interpreter_constraint:
pex_builder.add_interpreter_constraint(ic)
indexes = compute_indexes(options)
for requirements_pex in options.requirements_pexes:
pex_builder.add_from_requirements_pex(requirements_pex)
with TRACER.timed("Resolving distributions ({})".format(reqs + options.requirement_files)):
if options.cache_ttl:
pex_warnings.warn("The --cache-ttl option is deprecated and no longer has any effect.")
if options.headers:
pex_warnings.warn("The --header option is deprecated and no longer has any effect.")
network_configuration = NetworkConfiguration(
retries=options.retries,
timeout=options.timeout,
proxy=options.proxy,
cert=options.cert,
client_cert=options.client_cert,
)
try:
if options.pex_repository:
with TRACER.timed(
"Resolving requirements from PEX {}.".format(options.pex_repository)
):
resolveds = resolve_from_pex(
pex=options.pex_repository,
requirements=reqs,
requirement_files=options.requirement_files,
constraint_files=options.constraint_files,
network_configuration=network_configuration,
transitive=options.transitive,
interpreters=interpreters,
platforms=list(platforms),
manylinux=options.manylinux,
ignore_errors=options.ignore_errors,
)
else:
with TRACER.timed("Resolving requirements."):
resolveds = resolve_multi(
requirements=reqs,
requirement_files=options.requirement_files,
constraint_files=options.constraint_files,
allow_prereleases=options.allow_prereleases,
transitive=options.transitive,
interpreters=interpreters,
platforms=list(platforms),
indexes=indexes,
find_links=options.find_links,
resolver_version=ResolverVersion.for_value(options.resolver_version),
network_configuration=network_configuration,
cache=cache,
build=options.build,
use_wheel=options.use_wheel,
compile=options.compile,
manylinux=options.manylinux,
max_parallel_jobs=options.max_parallel_jobs,
ignore_errors=options.ignore_errors,
)
for resolved_dist in resolveds:
pex_builder.add_distribution(resolved_dist.distribution)
if resolved_dist.direct_requirement:
pex_builder.add_requirement(resolved_dist.direct_requirement)
except Unsatisfiable as e:
die(str(e))
if options.entry_point and options.script:
die("Must specify at most one entry point or script.", INVALID_OPTIONS)
if options.entry_point:
pex_builder.set_entry_point(options.entry_point)
elif options.script:
pex_builder.set_script(options.script)
if options.python_shebang:
pex_builder.set_shebang(options.python_shebang)
return pex_builder
def transform_legacy_arg(arg):
# type: (str) -> str
# inherit-path used to be a boolean arg (so either was absent, or --inherit-path)
# Now it takes a string argument, so --inherit-path is invalid.
# Fix up the args we're about to parse to preserve backwards compatibility.
if arg == "--inherit-path":
return "--inherit-path={}".format(InheritPath.PREFER.value)
return arg
def _compatible_with_current_platform(interpreter, platforms):
if not platforms:
return True
current_platforms = set(interpreter.supported_platforms)
current_platforms.add(None)
return current_platforms.intersection(platforms)
def main(args=None):
args = args[:] if args else sys.argv[1:]
args = [transform_legacy_arg(arg) for arg in args]
parser = configure_clp()
try:
separator = args.index("--")
args, cmdline = args[:separator], args[separator + 1 :]
except ValueError:
args, cmdline = args, []
options = parser.parse_args(args=args)
# Ensure the TMPDIR is an absolute path (So subprocesses that change CWD can find it) and
# that it exists.
tmpdir = os.path.realpath(options.tmpdir)
if not os.path.exists(tmpdir):
die("The specified --tmpdir does not exist: {}".format(tmpdir))
if not os.path.isdir(tmpdir):
die("The specified --tmpdir is not a directory: {}".format(tmpdir))
tempfile.tempdir = os.environ["TMPDIR"] = tmpdir
if options.cache_dir:
pex_warnings.warn("The --cache-dir option is deprecated, use --pex-root instead.")
if options.pex_root and options.cache_dir != options.pex_root:
die(
"Both --cache-dir and --pex-root were passed with conflicting values. "
"Just set --pex-root."
)
if options.disable_cache:
def warn_ignore_pex_root(set_via):
pex_warnings.warn(
"The pex root has been set via {via} but --disable-cache is also set. "
"Ignoring {via} and disabling caches.".format(via=set_via)
)
if options.cache_dir:
warn_ignore_pex_root("--cache-dir")
elif options.pex_root:
warn_ignore_pex_root("--pex-root")
elif os.environ.get("PEX_ROOT"):
warn_ignore_pex_root("PEX_ROOT")
pex_root = safe_mkdtemp()
else:
pex_root = options.cache_dir or options.pex_root or ENV.PEX_ROOT
if options.python and options.interpreter_constraint:
die('The "--python" and "--interpreter-constraint" options cannot be used together.')
if options.pex_repository and (options.indexes or options.find_links):
die(
'The "--pex-repository" option cannot be used together with the "--index" or '
'"--find-links" options.'
)
with ENV.patch(
PEX_VERBOSE=str(options.verbosity), PEX_ROOT=pex_root, TMPDIR=tmpdir
) as patched_env:
with TRACER.timed("Building pex"):
pex_builder = build_pex(options.requirements, options, cache=ENV.PEX_ROOT)
pex_builder.freeze(bytecode_compile=options.compile)
interpreter = pex_builder.interpreter
pex = PEX(
pex_builder.path(), interpreter=interpreter, verify_entry_point=options.validate_ep
)
if options.pex_name is not None:
log("Saving PEX file to %s" % options.pex_name, V=options.verbosity)
pex_builder.build(
options.pex_name,
bytecode_compile=options.compile,
deterministic_timestamp=not options.use_system_time,
)
if options.seed:
execute_cached_args = seed_cache(options, pex)
print(" ".join(execute_cached_args))
else:
if not _compatible_with_current_platform(interpreter, options.platforms):
log("WARNING: attempting to run PEX with incompatible platforms!", V=1)
log(
"Running on platform {} but built for {}".format(
interpreter.platform, ", ".join(map(str, options.platforms))
),
V=1,
)
log(
"Running PEX file at %s with args %s" % (pex_builder.path(), cmdline),
V=options.verbosity,
)
sys.exit(pex.run(args=list(cmdline), env=patched_env))
def seed_cache(
options, # type: Namespace
pex, # type: PEX
):
# type: (...) -> Iterable[str]
pex_path = pex.path()
with TRACER.timed("Seeding local caches for {}".format(pex_path)):
if options.unzip:
unzip_dir = pex.pex_info().unzip_dir
if unzip_dir is None:
raise AssertionError(
"Expected PEX-INFO for {} to have the components of an unzip directory".format(
pex_path
)
)
with atomic_directory(unzip_dir, exclusive=True) as chroot:
if not chroot.is_finalized:
with TRACER.timed("Extracting {}".format(pex_path)):
with open_zip(options.pex_name) as pex_zip:
pex_zip.extractall(chroot.work_dir)
return [pex.interpreter.binary, unzip_dir]
elif options.venv:
with TRACER.timed("Creating venv from {}".format(pex_path)):
venv_pex = ensure_venv(pex)
return [venv_pex]
else:
with TRACER.timed("Extracting code and distributions for {}".format(pex_path)):
pex.activate()
return [os.path.abspath(options.pex_name)]
if __name__ == "__main__":
main()
| |
#!/usr/bin/env python
# generate Python Manifest for the OpenEmbedded build system
# (C) 2002-2010 Michael 'Mickey' Lauer <mlauer@vanille-media.de>
# (C) 2007 Jeremy Laine
# licensed under MIT, see COPYING.MIT
import os
import sys
import time
VERSION = "2.6.4"
__author__ = "Michael 'Mickey' Lauer <mlauer@vanille-media.de>"
__version__ = "20100908"
class MakefileMaker:
def __init__( self, outfile ):
"""initialize"""
self.packages = {}
self.targetPrefix = "${libdir}/python%s/" % VERSION[:3]
self.output = outfile
self.out( """
# WARNING: This file is AUTO GENERATED: Manual edits will be lost next time I regenerate the file.
# Generator: '%s' Version %s (C) 2002-2010 Michael 'Mickey' Lauer <mlauer@vanille-media.de>
# Visit the Python for Embedded Systems Site => http://www.Vanille.de/projects/python.spy
""" % ( sys.argv[0], __version__ ) )
#
# helper functions
#
def out( self, data ):
"""print a line to the output file"""
self.output.write( "%s\n" % data )
def setPrefix( self, targetPrefix ):
"""set a file prefix for addPackage files"""
self.targetPrefix = targetPrefix
def doProlog( self ):
self.out( """ """ )
self.out( "" )
def addPackage( self, name, description, dependencies, filenames ):
"""add a package to the Makefile"""
if type( filenames ) == type( "" ):
filenames = filenames.split()
fullFilenames = []
for filename in filenames:
if filename[0] != "$":
fullFilenames.append( "%s%s" % ( self.targetPrefix, filename ) )
else:
fullFilenames.append( filename )
self.packages[name] = description, dependencies, fullFilenames
def doBody( self ):
"""generate body of Makefile"""
global VERSION
#
# generate provides line
#
provideLine = 'PROVIDES+="'
for name in self.packages:
provideLine += "%s " % name
provideLine += '"'
self.out( provideLine )
self.out( "" )
#
# generate package line
#
packageLine = 'PACKAGES="python-core-dbg '
for name in self.packages:
if name != 'python-core-dbg':
packageLine += "%s " % name
packageLine += 'python-modules"'
self.out( packageLine )
self.out( "" )
#
# generate package variables
#
for name, data in self.packages.iteritems():
desc, deps, files = data
#
# write out the description, revision and dependencies
#
self.out( 'DESCRIPTION_%s="%s"' % ( name, desc ) )
self.out( 'RDEPENDS_%s="%s"' % ( name, deps ) )
line = 'FILES_%s="' % name
#
# check which directories to make in the temporary directory
#
dirset = {} # if python had a set-datatype this would be sufficient. for now, we're using a dict instead.
for target in files:
dirset[os.path.dirname( target )] = True
#
# generate which files to copy for the target (-dfR because whole directories are also allowed)
#
for target in files:
line += "%s " % target
line += '"'
self.out( line )
self.out( "" )
self.out( 'DESCRIPTION_python-modules="All Python modules"' )
line = 'RDEPENDS_python-modules="'
for name, data in self.packages.iteritems():
if name not in ['python-core-dbg', 'python-dev']:
line += "%s " % name
self.out( "%s \"" % line )
self.out( 'ALLOW_EMPTY_python-modules = "1"' )
def doEpilog( self ):
self.out( """""" )
self.out( "" )
def make( self ):
self.doProlog()
self.doBody()
self.doEpilog()
if __name__ == "__main__":
if len( sys.argv ) > 1:
os.popen( "rm -f ./%s" % sys.argv[1] )
outfile = file( sys.argv[1], "w" )
else:
outfile = sys.stdout
m = MakefileMaker( outfile )
# Add packages here. Only specify dlopen-style library dependencies here, no ldd-style dependencies!
# Parameters: revision, name, description, dependencies, filenames
#
m.addPackage( "python-core", "Python Interpreter and core modules (needed!)", "",
"__future__.* _abcoll.* abc.* copy.* copy_reg.* ConfigParser.* " +
"genericpath.* getopt.* linecache.* new.* " +
"os.* posixpath.* struct.* " +
"warnings.* site.* stat.* " +
"UserDict.* UserList.* UserString.* " +
"lib-dynload/binascii.so lib-dynload/_struct.so lib-dynload/time.so " +
"lib-dynload/xreadlines.so types.* platform.* ${bindir}/python*" )
m.addPackage( "python-core-dbg", "Python core module debug information", "python-core",
"config/.debug lib-dynload/.debug ${bindir}/.debug ${libdir}/.debug" )
m.addPackage( "python-dev", "Python Development Package", "python-core",
"${includedir} ${libdir}/libpython2.6.so config" ) # package
m.addPackage( "python-idle", "Python Integrated Development Environment", "python-core python-tkinter",
"${bindir}/idle idlelib" ) # package
m.addPackage( "python-pydoc", "Python Interactive Help Support", "python-core python-lang python-stringold python-re",
"${bindir}/pydoc pydoc.*" )
m.addPackage( "python-smtpd", "Python Simple Mail Transport Daemon", "python-core python-netserver python-email python-mime",
"${bindir}/smtpd.*" )
m.addPackage( "python-audio", "Python Audio Handling", "python-core",
"wave.* chunk.* sndhdr.* lib-dynload/ossaudiodev.so lib-dynload/audioop.so" )
m.addPackage( "python-bsddb", "Python Berkeley Database Bindings", "python-core",
"bsddb lib-dynload/_bsddb.so" ) # package
m.addPackage( "python-codecs", "Python Codecs, Encodings & i18n Support", "python-core python-lang",
"codecs.* encodings gettext.* locale.* lib-dynload/_locale.so lib-dynload/unicodedata.so stringprep.* xdrlib.*" )
m.addPackage( "python-compile", "Python Bytecode Compilation Support", "python-core",
"py_compile.* compileall.*" )
m.addPackage( "python-compiler", "Python Compiler Support", "python-core",
"compiler" ) # package
m.addPackage( "python-compression", "Python High Level Compression Support", "python-core python-zlib",
"gzip.* zipfile.* tarfile.* lib-dynload/bz2.so" )
m.addPackage( "python-crypt", "Python Basic Cryptographic and Hashing Support", "python-core",
"hashlib.* md5.* sha.* lib-dynload/crypt.so lib-dynload/_hashlib.so lib-dynload/_sha256.so lib-dynload/_sha512.so" )
m.addPackage( "python-textutils", "Python Option Parsing, Text Wrapping and Comma-Separated-Value Support", "python-core python-io python-re python-stringold",
"lib-dynload/_csv.so csv.* optparse.* textwrap.*" )
m.addPackage( "python-curses", "Python Curses Support", "python-core",
"curses lib-dynload/_curses.so lib-dynload/_curses_panel.so" ) # directory + low level module
m.addPackage( "python-ctypes", "Python C Types Support", "python-core",
"ctypes lib-dynload/_ctypes.so" ) # directory + low level module
m.addPackage( "python-datetime", "Python Calendar and Time support", "python-core python-codecs",
"_strptime.* calendar.* lib-dynload/datetime.so" )
m.addPackage( "python-db", "Python File-Based Database Support", "python-core",
"anydbm.* dumbdbm.* whichdb.* " )
m.addPackage( "python-debugger", "Python Debugger", "python-core python-io python-lang python-re python-stringold python-shell python-pprint",
"bdb.* pdb.*" )
m.addPackage( "python-difflib", "Python helpers for computing deltas between objects.", "python-lang python-re",
"difflib.*" )
m.addPackage( "python-distutils", "Python Distribution Utilities", "python-core",
"config distutils" ) # package
m.addPackage( "python-doctest", "Python framework for running examples in docstrings.", "python-core python-lang python-io python-re python-unittest python-debugger python-difflib",
"doctest.*" )
# FIXME consider adding to some higher level package
m.addPackage( "python-elementtree", "Python elementree", "python-core",
"lib-dynload/_elementtree.so" )
m.addPackage( "python-email", "Python Email Support", "python-core python-io python-re python-mime python-audio python-image python-netclient",
"imaplib.* email" ) # package
m.addPackage( "python-fcntl", "Python's fcntl Interface", "python-core",
"lib-dynload/fcntl.so" )
m.addPackage( "python-hotshot", "Python Hotshot Profiler", "python-core",
"hotshot lib-dynload/_hotshot.so" )
m.addPackage( "python-html", "Python HTML Processing", "python-core",
"formatter.* htmlentitydefs.* htmllib.* markupbase.* sgmllib.* " )
m.addPackage( "python-gdbm", "Python GNU Database Support", "python-core",
"lib-dynload/gdbm.so" )
m.addPackage( "python-image", "Python Graphical Image Handling", "python-core",
"colorsys.* imghdr.* lib-dynload/imageop.so lib-dynload/rgbimg.so" )
m.addPackage( "python-io", "Python Low-Level I/O", "python-core python-math",
"lib-dynload/_socket.so lib-dynload/_ssl.so lib-dynload/select.so lib-dynload/termios.so lib-dynload/cStringIO.so " +
"pipes.* socket.* ssl.* tempfile.* StringIO.* " )
m.addPackage( "python-json", "Python JSON Support", "python-core python-math python-re",
"json" ) # package
m.addPackage( "python-lang", "Python Low-Level Language Support", "python-core",
"lib-dynload/_bisect.so lib-dynload/_collections.so lib-dynload/_heapq.so lib-dynload/_weakref.so lib-dynload/_functools.so " +
"lib-dynload/array.so lib-dynload/itertools.so lib-dynload/operator.so lib-dynload/parser.so " +
"atexit.* bisect.* code.* codeop.* collections.* dis.* functools.* heapq.* inspect.* keyword.* opcode.* symbol.* repr.* token.* " +
"tokenize.* traceback.* linecache.* weakref.*" )
m.addPackage( "python-logging", "Python Logging Support", "python-core python-io python-lang python-pickle python-stringold",
"logging" ) # package
m.addPackage( "python-mailbox", "Python Mailbox Format Support", "python-core python-mime",
"mailbox.*" )
m.addPackage( "python-math", "Python Math Support", "python-core",
"lib-dynload/cmath.so lib-dynload/math.so lib-dynload/_random.so random.* sets.*" )
m.addPackage( "python-mime", "Python MIME Handling APIs", "python-core python-io",
"mimetools.* uu.* quopri.* rfc822.*" )
m.addPackage( "python-mmap", "Python Memory-Mapped-File Support", "python-core python-io",
"lib-dynload/mmap.so " )
m.addPackage( "python-multiprocessing", "Python Multiprocessing Support", "python-core python-io python-lang",
"lib-dynload/_multiprocessing.so multiprocessing" ) # package
m.addPackage( "python-netclient", "Python Internet Protocol Clients", "python-core python-crypt python-datetime python-io python-lang python-logging python-mime",
"*Cookie*.* " +
"base64.* cookielib.* ftplib.* gopherlib.* hmac.* httplib.* mimetypes.* nntplib.* poplib.* smtplib.* telnetlib.* urllib.* urllib2.* urlparse.* uuid.* rfc822.* mimetools.*" )
m.addPackage( "python-netserver", "Python Internet Protocol Servers", "python-core python-netclient",
"cgi.* *HTTPServer.* SocketServer.*" )
m.addPackage( "python-numbers", "Python Number APIs", "python-core python-lang python-re",
"decimal.* numbers.*" )
m.addPackage( "python-pickle", "Python Persistence Support", "python-core python-codecs python-io python-re",
"pickle.* shelve.* lib-dynload/cPickle.so" )
m.addPackage( "python-pkgutil", "Python Package Extension Utility Support", "python-core",
"pkgutil.*")
m.addPackage( "python-pprint", "Python Pretty-Print Support", "python-core",
"pprint.*" )
m.addPackage( "python-profile", "Python Basic Profiling Support", "python-core python-textutils",
"profile.* pstats.* cProfile.* lib-dynload/_lsprof.so" )
m.addPackage( "python-re", "Python Regular Expression APIs", "python-core",
"re.* sre.* sre_compile.* sre_constants* sre_parse.*" ) # _sre is builtin
m.addPackage( "python-readline", "Python Readline Support", "python-core",
"lib-dynload/readline.so rlcompleter.*" )
m.addPackage( "python-resource", "Python Resource Control Interface", "python-core",
"lib-dynload/resource.so" )
m.addPackage( "python-shell", "Python Shell-Like Functionality", "python-core python-re",
"cmd.* commands.* dircache.* fnmatch.* glob.* popen2.* shlex.* shutil.*" )
m.addPackage( "python-robotparser", "Python robots.txt parser", "python-core python-netclient",
"robotparser.*")
m.addPackage( "python-subprocess", "Python Subprocess Support", "python-core python-io python-re python-fcntl python-pickle",
"subprocess.*" )
m.addPackage( "python-sqlite3", "Python Sqlite3 Database Support", "python-core python-datetime python-lang python-crypt python-io python-threading python-zlib",
"lib-dynload/_sqlite3.so sqlite3/dbapi2.* sqlite3/__init__.* sqlite3/dump.*" )
m.addPackage( "python-sqlite3-tests", "Python Sqlite3 Database Support Tests", "python-core python-sqlite3",
"sqlite3/test" )
m.addPackage( "python-stringold", "Python String APIs [deprecated]", "python-core python-re",
"lib-dynload/strop.so string.*" )
m.addPackage( "python-syslog", "Python Syslog Interface", "python-core",
"lib-dynload/syslog.so" )
m.addPackage( "python-terminal", "Python Terminal Controlling Support", "python-core python-io",
"pty.* tty.*" )
m.addPackage( "python-tests", "Python Tests", "python-core",
"test" ) # package
m.addPackage( "python-threading", "Python Threading & Synchronization Support", "python-core python-lang",
"_threading_local.* dummy_thread.* dummy_threading.* mutex.* threading.* Queue.*" )
m.addPackage( "python-tkinter", "Python Tcl/Tk Bindings", "python-core",
"lib-dynload/_tkinter.so lib-tk" ) # package
m.addPackage( "python-unittest", "Python Unit Testing Framework", "python-core python-stringold python-lang",
"unittest.*" )
m.addPackage( "python-unixadmin", "Python Unix Administration Support", "python-core",
"lib-dynload/nis.so lib-dynload/grp.so lib-dynload/pwd.so getpass.*" )
m.addPackage( "python-xml", "Python basic XML support.", "python-core python-re",
"lib-dynload/pyexpat.so xml xmllib.*" ) # package
m.addPackage( "python-xmlrpc", "Python XMLRPC Support", "python-core python-xml python-netserver python-lang",
"xmlrpclib.* SimpleXMLRPCServer.*" )
m.addPackage( "python-zlib", "Python zlib Support.", "python-core",
"lib-dynload/zlib.so" )
m.addPackage( "python-mailbox", "Python Mailbox Format Support", "python-core python-mime",
"mailbox.*" )
m.make()
| |
# -*- coding: utf-8 -*-
# Copyright (c) 2013, 2015, 2016, 2018, 2019 Matthew Zipay.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""Functional test cases and runner for classes and functions that are
both ``@logged`` and ``@traced``.
"""
__author__ = "Matthew Zipay (mattzATninthtestDOTinfo)"
import logging
import unittest
from autologging import TRACE, __version__
from test import (
dummy_module_logger,
get_dummy_lineno,
list_handler,
named_logger,
named_tracer,
)
from test.dummy import logged_and_traced_function, LoggedAndTracedClass
from test.functest_logged import _LoggedFunctionalTest
from test.functest_traced import _TracedFunctionalTest
# suppress messages to the console
logging.getLogger().setLevel(logging.FATAL + 1)
class _LoggedAndTracedFunctionalTest(
_LoggedFunctionalTest, _TracedFunctionalTest):
def setUp(self):
dummy_module_logger.setLevel(TRACE)
named_logger.setLevel(TRACE)
named_tracer.setLevel(TRACE)
list_handler.reset()
class LoggedAndTracedClassFunctionalTest(_LoggedAndTracedFunctionalTest):
"""Test the log and trace records for a class that has been
decorated with **both** :func:`autologging.logged` and
:func:`autologging.traced`.
"""
def test_log_record_in_untraced_method(self):
LoggedAndTracedClass()
self.assertEqual(1, len(list_handler.records))
init_function = LoggedAndTracedClass.__dict__["__init__"]
self._assert_log_record(
list_handler.records[0], init_function,
"test.dummy.LoggedAndTracedClass", "LATC.__i__")
def test_instance_method_log_and_trace_records(self):
value = LoggedAndTracedClass().method(None)
self.assertEqual("LATC.m None and None", value)
self.assertEqual(4, len(list_handler.records))
method_function = LoggedAndTracedClass.__dict__["method"].__wrapped__
self._assert_call_record(
list_handler.records[1], method_function,
"test.dummy.LoggedAndTracedClass", ((None,), dict()), "LATC.m")
self._assert_log_record(
list_handler.records[2], method_function,
"test.dummy.LoggedAndTracedClass", "LATC.m")
self._assert_return_record(
list_handler.records[3], method_function,
"test.dummy.LoggedAndTracedClass", ("LATC.m None and None",),
"LATC.m")
def test_call_method_log_and_trace_records(self):
obj = LoggedAndTracedClass.NestedClass()
list_handler.reset()
rv = obj("test")
self.assertEqual("LATC.NC.__call__ test", rv)
self.assertEqual(3, len(list_handler.records))
wrapped_function = \
LoggedAndTracedClass.NestedClass.__dict__["__call__"].__wrapped__
qualname = getattr(
LoggedAndTracedClass.NestedClass, "__qualname__", "NestedClass")
expected_tracer_name = "test.dummy.%s" % qualname
expected_logger_name = "logged.testing.%s" % qualname
self._assert_call_record(
list_handler.records[0], wrapped_function, expected_tracer_name,
(("test",), dict()), "LATC.NC.__c__")
self._assert_log_record(
list_handler.records[1], wrapped_function, expected_logger_name,
"LATC.NC.__c__")
self._assert_return_record(
list_handler.records[2], wrapped_function, expected_tracer_name,
("LATC.NC.__call__ test",), "LATC.NC.__c__")
def test_instance_method_log_record_when_trace_disabled(self):
dummy_module_logger.setLevel(logging.DEBUG)
value = LoggedAndTracedClass().method(None)
self.assertEqual("LATC.m None and None", value)
self.assertEqual(2, len(list_handler.records))
method_function = LoggedAndTracedClass.__dict__["method"].__wrapped__
self._assert_log_record(
list_handler.records[1], method_function,
"test.dummy.LoggedAndTracedClass", "LATC.m")
def test_instance_method_log_and_trace_records_when_both_disabled(self):
dummy_module_logger.setLevel(logging.WARN)
value = LoggedAndTracedClass().method(None)
self.assertEqual("LATC.m None and None", value)
self.assertEqual(0, len(list_handler.records))
def test_nested_method_log_and_trace_records(self):
LoggedAndTracedClass.NestedClass()
self.assertEqual(3, len(list_handler.records))
wrapped_function = \
LoggedAndTracedClass.NestedClass.__dict__["__init__"].__wrapped__
qualname = getattr(
LoggedAndTracedClass.NestedClass, "__qualname__", "NestedClass")
expected_tracer_name = "test.dummy.%s" % qualname
expected_logger_name = "logged.testing.%s" % qualname
self._assert_call_record(
list_handler.records[0], wrapped_function, expected_tracer_name,
(tuple(), dict()), "LATC.NC.__i__")
self._assert_log_record(
list_handler.records[1], wrapped_function, expected_logger_name,
"LATC.NC.__i__")
self._assert_return_record(
list_handler.records[2], wrapped_function, expected_tracer_name,
(None,), "LATC.NC.__i__")
def test_nested_nonpublic_method_log_and_trace_records(self):
LoggedAndTracedClass._NonPublicNestedClass()
self.assertEqual(3, len(list_handler.records))
wrapped_function = LoggedAndTracedClass._NonPublicNestedClass.__dict__[
"__init__"].__wrapped__
qualname = getattr(
LoggedAndTracedClass._NonPublicNestedClass, "__qualname__",
"_NonPublicNestedClass")
expected_tracer_name = "traced.testing.%s" % qualname
expected_logger_name = "test.dummy.%s" % qualname
self._assert_call_record(
list_handler.records[0], wrapped_function, expected_tracer_name,
(tuple(), dict()), "LATC._NPNC.__i__")
self._assert_log_record(
list_handler.records[1], wrapped_function, expected_logger_name,
"LATC._NPNC.__i__")
self._assert_return_record(
list_handler.records[2], wrapped_function, expected_tracer_name,
(None,), "LATC._NPNC.__i__")
def test_nested_internal_log_record_in_untraced_method(self):
LoggedAndTracedClass._LoggedAndTracedClass__InternalNestedClass()
self.assertEqual(1, len(list_handler.records))
init_function = (
LoggedAndTracedClass._LoggedAndTracedClass__InternalNestedClass.
__dict__["__init__"])
expected_logger_name = "logged.testing.%s" % getattr(
LoggedAndTracedClass._LoggedAndTracedClass__InternalNestedClass,
"__qualname__",
LoggedAndTracedClass._LoggedAndTracedClass__InternalNestedClass.__name__)
self._assert_log_record(
list_handler.records[0], init_function, expected_logger_name,
"LATC.__INC.__i__")
def test_nested_internal_method_log_and_trace_records(self):
obj = LoggedAndTracedClass._LoggedAndTracedClass__InternalNestedClass()
value = obj.method(None)
self.assertEqual("LATC.__INC.m None and None", value)
self.assertEqual(4, len(list_handler.records))
wrapped_function = (
LoggedAndTracedClass._LoggedAndTracedClass__InternalNestedClass.
__dict__["method"].__wrapped__)
qualname = getattr(
LoggedAndTracedClass._LoggedAndTracedClass__InternalNestedClass,
"__qualname__",
LoggedAndTracedClass._LoggedAndTracedClass__InternalNestedClass.__name__)
expected_tracer_name = "traced.testing.%s" % qualname
expected_logger_name = "logged.testing.%s" % qualname
self._assert_call_record(
list_handler.records[1], wrapped_function, expected_tracer_name,
((None,), dict()), "LATC.__INC.m")
self._assert_log_record(
list_handler.records[2], wrapped_function, expected_logger_name,
"LATC.__INC.m")
self._assert_return_record(
list_handler.records[3], wrapped_function, expected_tracer_name,
("LATC.__INC.m None and None",), "LATC.__INC.m")
def test_nested_internal_method_log_record_when_trace_disabled(self):
named_tracer.setLevel(logging.DEBUG)
obj = LoggedAndTracedClass._LoggedAndTracedClass__InternalNestedClass()
value = obj.method(None)
self.assertEqual("LATC.__INC.m None and None", value)
self.assertEqual(2, len(list_handler.records))
wrapped_function = (
LoggedAndTracedClass._LoggedAndTracedClass__InternalNestedClass.
__dict__["method"].__wrapped__)
expected_logger_name = "logged.testing.%s" % getattr(
LoggedAndTracedClass._LoggedAndTracedClass__InternalNestedClass,
"__qualname__",
LoggedAndTracedClass._LoggedAndTracedClass__InternalNestedClass.__name__)
self._assert_log_record(
list_handler.records[1], wrapped_function, expected_logger_name,
"LATC.__INC.m")
def test_nested_internal_method_trace_records_when_log_disabled(self):
named_logger.setLevel(logging.WARN)
obj = LoggedAndTracedClass._LoggedAndTracedClass__InternalNestedClass()
value = obj.method(None)
self.assertEqual("LATC.__INC.m None and None", value)
self.assertEqual(2, len(list_handler.records))
wrapped_function = (
LoggedAndTracedClass._LoggedAndTracedClass__InternalNestedClass.
__dict__["method"].__wrapped__)
expected_tracer_name = "traced.testing.%s" % getattr(
LoggedAndTracedClass._LoggedAndTracedClass__InternalNestedClass,
"__qualname__",
LoggedAndTracedClass._LoggedAndTracedClass__InternalNestedClass.__name__)
self._assert_call_record(
list_handler.records[0], wrapped_function, expected_tracer_name,
((None,), dict()), "LATC.__INC.m")
self._assert_return_record(
list_handler.records[1], wrapped_function, expected_tracer_name,
("LATC.__INC.m None and None",), "LATC.__INC.m")
class LoggedAndTracedFunctionFunctionalTest(_LoggedAndTracedFunctionalTest):
"""Test the log and trace records for a function that has been
decorated with **both** :func:`autologging.logged` and
:func:`autologging.traced`.
"""
def test_function_log_and_trace_records(self):
nested_function = logged_and_traced_function(None)
self.assertEqual(3, len(list_handler.records))
wrapped_function = logged_and_traced_function.__wrapped__
self._assert_call_record(
list_handler.records[0], wrapped_function, "traced.testing",
((None,), dict()), "l_a_t_f")
self._assert_log_record(
list_handler.records[1], wrapped_function, "test.dummy", "l_a_t_f")
self._assert_return_record(
list_handler.records[2], wrapped_function, "traced.testing",
(nested_function,), "l_a_t_f")
def test_function_log_record_when_trace_disabled(self):
named_tracer.setLevel(logging.DEBUG)
nested_function = logged_and_traced_function(None)
self.assertEqual(1, len(list_handler.records))
wrapped_function = logged_and_traced_function.__wrapped__
self._assert_log_record(
list_handler.records[0], wrapped_function, "test.dummy", "l_a_t_f")
def test_function_trace_records_when_log_disabled(self):
dummy_module_logger.setLevel(logging.WARN)
nested_function = logged_and_traced_function(None)
self.assertEqual(2, len(list_handler.records))
wrapped_function = logged_and_traced_function.__wrapped__
self._assert_call_record(
list_handler.records[0], wrapped_function, "traced.testing",
((None,), dict()), "l_a_t_f")
self._assert_return_record(
list_handler.records[1], wrapped_function, "traced.testing",
(nested_function,), "l_a_t_f")
def test_nested_function_log_and_trace_records(self):
nested_function = logged_and_traced_function(None)
value = nested_function(None)
self.assertEqual("l_a_t_f.n_t_a_l_f None and None", value)
self.assertEqual(6, len(list_handler.records))
wrapped_function = nested_function.__wrapped__
self._assert_call_record(
list_handler.records[3], wrapped_function, "test.dummy",
((None,), dict()), "l_a_t_f.n_t_a_l_f")
self._assert_log_record(
list_handler.records[4], wrapped_function, "logged.testing",
"l_a_t_f.n_t_a_l_f")
self._assert_return_record(
list_handler.records[5], wrapped_function, "test.dummy",
("l_a_t_f.n_t_a_l_f None and None",), "l_a_t_f.n_t_a_l_f")
def test_nested_function_log_record_when_trace_disabled(self):
dummy_module_logger.setLevel(logging.DEBUG)
nested_function = logged_and_traced_function(None)
value = nested_function(None)
self.assertEqual("l_a_t_f.n_t_a_l_f None and None", value)
self.assertEqual(4, len(list_handler.records))
wrapped_function = nested_function.__wrapped__
self._assert_log_record(
list_handler.records[3], wrapped_function, "logged.testing",
"l_a_t_f.n_t_a_l_f")
def test_nested_function_trace_records_when_log_disabled(self):
named_logger.setLevel(logging.WARN)
nested_function = logged_and_traced_function(None)
value = nested_function(None)
self.assertEqual("l_a_t_f.n_t_a_l_f None and None", value)
self.assertEqual(5, len(list_handler.records))
wrapped_function = nested_function.__wrapped__
self._assert_call_record(
list_handler.records[3], wrapped_function, "test.dummy",
((None,), dict()), "l_a_t_f.n_t_a_l_f")
self._assert_return_record(
list_handler.records[4], wrapped_function, "test.dummy",
("l_a_t_f.n_t_a_l_f None and None",), "l_a_t_f.n_t_a_l_f")
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(LoggedAndTracedClassFunctionalTest))
suite.addTest(unittest.makeSuite(LoggedAndTracedFunctionFunctionalTest))
return suite
if __name__ == "__main__":
unittest.TextTestRunner().run(suite())
| |
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Manage indexing for snapshotter service"""
import logging
from sqlalchemy.sql.expression import tuple_
from ggrc import db
from ggrc import models
from ggrc.models import all_models
from ggrc.fulltext.mysql import MysqlRecordProperty as Record
from ggrc.fulltext.recordbuilder import RecordBuilder
from ggrc.models.reflection import AttributeInfo
from ggrc.utils import generate_query_chunks
from ggrc.snapshotter.rules import Types
from ggrc.snapshotter.datastructures import Pair
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def _get_tag(pair):
return u"{parent_type}-{parent_id}-{child_type}".format(
parent_type=pair.parent.type,
parent_id=pair.parent.id,
child_type=pair.child.type
)
def _get_parent_property(pair):
return u"{parent_type}-{parent_id}".format(
parent_type=pair.parent.type,
parent_id=pair.parent.id
)
def _get_child_property(pair):
return u"{child_type}-{child_id}".format(
child_type=pair.child.type,
child_id=pair.child.id
)
def _get_columns():
"""Get common columns for snapshots and revisions tables."""
snapshot_columns = db.session.query(
models.Snapshot.id,
models.Snapshot.context_id,
models.Snapshot.parent_type,
models.Snapshot.parent_id,
models.Snapshot.child_type,
models.Snapshot.child_id,
models.Snapshot.revision_id
)
revision_columns = db.session.query(
models.Revision.id,
models.Revision.resource_type,
models.Revision.content
)
return snapshot_columns, revision_columns
def _get_model_properties():
"""Get indexable properties for all snapshottable objects
Args:
None
Returns:
tuple(class_properties dict, custom_attribute_definitions dict) - Tuple of
dictionaries, first one representing a list of searchable attributes
for every model and second one representing dictionary of custom
attribute definition attributes.
"""
# pylint: disable=protected-access
class_properties = dict()
klass_names = Types.all
cadef_klass_names = {
getattr(all_models, klass)._inflector.table_singular
for klass in klass_names
}
cad_query = db.session.query(
models.CustomAttributeDefinition.id,
models.CustomAttributeDefinition.title,
).filter(
models.CustomAttributeDefinition.definition_type.in_(cadef_klass_names)
)
for klass_name in klass_names:
model_attributes = AttributeInfo.gather_attrs(
getattr(all_models, klass_name), '_fulltext_attrs')
class_properties[klass_name] = model_attributes
return class_properties, cad_query.all()
def get_searchable_attributes(attributes, cad_list, content):
"""Get all searchable attributes for a given object that should be indexed
Args:
attributes: Attributes that should be extracted from some model
cad_keys: IDs of custom attribute definitions
ca_definitions: Dictionary of "CAD ID" -> "CAD title"
content: dictionary (JSON) representation of an object
Return:
Dict of "key": "value" from objects revision
"""
searchable_values = {attr: content.get(attr) for attr in attributes}
cad_map = {cad.id: cad for cad in cad_list}
cav_list = content.get("custom_attributes", [])
for cav in cav_list:
cad = cad_map.get(cav["custom_attribute_id"])
if cad:
searchable_values[cad.title] = cav["attribute_value"]
return searchable_values
def reindex():
"""Reindex all snapshots."""
columns = db.session.query(
models.Snapshot.parent_type,
models.Snapshot.parent_id,
models.Snapshot.child_type,
models.Snapshot.child_id,
)
for query_chunk in generate_query_chunks(columns):
pairs = {Pair.from_4tuple(p) for p in query_chunk}
reindex_pairs(pairs)
db.session.commit()
def delete_records(snapshot_ids):
"""Delete all records for some snapshots.
Args:
snapshot_ids: An iterable with snapshot IDs whose full text records should
be deleted.
"""
to_delete = {("Snapshot", _id) for _id in snapshot_ids}
db.session.query(Record).filter(
tuple_(Record.type, Record.key).in_(to_delete)
).delete(synchronize_session=False)
db.session.commit()
def insert_records(payload):
"""Insert records to full text table.
Args:
payload: List of dictionaries that represent records entries.
"""
engine = db.engine
engine.execute(Record.__table__.insert(), payload)
db.session.commit()
def get_person_data(rec, person):
"""Get list of Person properties for fulltext indexing
"""
subprops = RecordBuilder.build_person_subprops(person)
data = []
for key, val in subprops.items():
newrec = rec.copy()
newrec.update({"subproperty": key, "content": val})
data += [newrec]
return data
def get_person_sort_subprop(rec, people):
"""Get a special subproperty for sorting
"""
subprops = RecordBuilder.build_list_sort_subprop(people)
data = []
for key, val in subprops.items():
newrec = rec.copy()
newrec.update({"subproperty": key, "content": val})
data += [newrec]
return data
def reindex_pairs(pairs): # noqa # pylint:disable=too-many-branches
"""Reindex selected snapshots.
Args:
pairs: A list of parent-child pairs that uniquely represent snapshot
object whose properties should be reindexed.
"""
# pylint: disable=too-many-locals
snapshots = dict()
revisions = dict()
snap_to_sid_cache = dict()
search_payload = list()
object_properties, cad_list = _get_model_properties()
snapshot_columns, revision_columns = _get_columns()
snapshot_query = snapshot_columns
if pairs: # pylint:disable=too-many-nested-blocks
pairs_filter = tuple_(
models.Snapshot.parent_type,
models.Snapshot.parent_id,
models.Snapshot.child_type,
models.Snapshot.child_id,
).in_({pair.to_4tuple() for pair in pairs})
snapshot_query = snapshot_columns.filter(pairs_filter)
for _id, ctx_id, ptype, pid, ctype, cid, revid in snapshot_query:
pair = Pair.from_4tuple((ptype, pid, ctype, cid))
snapshots[pair] = [_id, ctx_id, revid]
snap_to_sid_cache[pair] = _id
revision_ids = {revid for _, _, revid in snapshots.values()}
revision_query = revision_columns.filter(
models.Revision.id.in_(revision_ids)
)
for _id, _type, content in revision_query:
revisions[_id] = get_searchable_attributes(
object_properties[_type], cad_list, content)
snapshot_ids = set()
for pair in snapshots:
snapshot_id, ctx_id, revision_id = snapshots[pair]
snapshot_ids.add(snapshot_id)
properties = revisions[revision_id]
properties.update({
"parent": _get_parent_property(pair),
"child": _get_child_property(pair),
"child_type": pair.child.type,
"child_id": pair.child.id
})
assignees = properties.pop("assignees", None)
if assignees:
for person, roles in assignees:
if person:
for role in roles:
properties[role] = [person]
single_person_properties = {"modified_by", "principal_assessor",
"secondary_assessor", "contact",
"secondary_contact"}
multiple_person_properties = {"owners"}
for prop, val in properties.items():
if prop and val is not None:
# record stub
rec = {
"key": snapshot_id,
"type": "Snapshot",
"context_id": ctx_id,
"tags": _get_tag(pair),
"property": prop,
"subproperty": "",
"content": val,
}
if prop in single_person_properties:
if val:
search_payload += get_person_data(rec, val)
search_payload += get_person_sort_subprop(rec, [val])
elif prop in multiple_person_properties:
for person in val:
search_payload += get_person_data(rec, person)
search_payload += get_person_sort_subprop(rec, val)
elif isinstance(val, dict) and "title" in val:
rec["content"] = val["title"]
search_payload += [rec]
elif isinstance(val, (bool, int, long)):
rec["content"] = unicode(val)
search_payload += [rec]
else:
if isinstance(rec["content"], basestring):
search_payload += [rec]
else:
logger.warning(u"Unsupported value for %s #%s in %s %s: %r",
rec["type"], rec["key"], rec["property"],
rec["subproperty"], rec["content"])
delete_records(snapshot_ids)
insert_records(search_payload)
| |
"""Support for the Abode Security System."""
from asyncio import gather
from copy import deepcopy
from functools import partial
from abodepy import Abode
from abodepy.exceptions import AbodeException
import abodepy.helpers.timeline as TIMELINE
from requests.exceptions import ConnectTimeout, HTTPError
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_DATE,
ATTR_ENTITY_ID,
ATTR_TIME,
CONF_PASSWORD,
CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.dispatcher import dispatcher_send
from homeassistant.helpers.entity import Entity
from .const import ATTRIBUTION, DEFAULT_CACHEDB, DOMAIN, LOGGER
CONF_POLLING = "polling"
SERVICE_SETTINGS = "change_setting"
SERVICE_CAPTURE_IMAGE = "capture_image"
SERVICE_TRIGGER_AUTOMATION = "trigger_automation"
ATTR_DEVICE_ID = "device_id"
ATTR_DEVICE_NAME = "device_name"
ATTR_DEVICE_TYPE = "device_type"
ATTR_EVENT_CODE = "event_code"
ATTR_EVENT_NAME = "event_name"
ATTR_EVENT_TYPE = "event_type"
ATTR_EVENT_UTC = "event_utc"
ATTR_SETTING = "setting"
ATTR_USER_NAME = "user_name"
ATTR_APP_TYPE = "app_type"
ATTR_EVENT_BY = "event_by"
ATTR_VALUE = "value"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_POLLING, default=False): cv.boolean,
}
)
},
extra=vol.ALLOW_EXTRA,
)
CHANGE_SETTING_SCHEMA = vol.Schema(
{vol.Required(ATTR_SETTING): cv.string, vol.Required(ATTR_VALUE): cv.string}
)
CAPTURE_IMAGE_SCHEMA = vol.Schema({ATTR_ENTITY_ID: cv.entity_ids})
AUTOMATION_SCHEMA = vol.Schema({ATTR_ENTITY_ID: cv.entity_ids})
ABODE_PLATFORMS = [
"alarm_control_panel",
"binary_sensor",
"lock",
"switch",
"cover",
"camera",
"light",
"sensor",
]
class AbodeSystem:
"""Abode System class."""
def __init__(self, abode, polling):
"""Initialize the system."""
self.abode = abode
self.polling = polling
self.entity_ids = set()
self.logout_listener = None
async def async_setup(hass, config):
"""Set up Abode integration."""
if DOMAIN not in config:
return True
conf = config[DOMAIN]
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=deepcopy(conf)
)
)
return True
async def async_setup_entry(hass, config_entry):
"""Set up Abode integration from a config entry."""
username = config_entry.data.get(CONF_USERNAME)
password = config_entry.data.get(CONF_PASSWORD)
polling = config_entry.data.get(CONF_POLLING)
try:
cache = hass.config.path(DEFAULT_CACHEDB)
abode = await hass.async_add_executor_job(
Abode, username, password, True, True, True, cache
)
hass.data[DOMAIN] = AbodeSystem(abode, polling)
except (AbodeException, ConnectTimeout, HTTPError) as ex:
LOGGER.error("Unable to connect to Abode: %s", str(ex))
raise ConfigEntryNotReady from ex
for platform in ABODE_PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, platform)
)
await setup_hass_events(hass)
await hass.async_add_executor_job(setup_hass_services, hass)
await hass.async_add_executor_job(setup_abode_events, hass)
return True
async def async_unload_entry(hass, config_entry):
"""Unload a config entry."""
hass.services.async_remove(DOMAIN, SERVICE_SETTINGS)
hass.services.async_remove(DOMAIN, SERVICE_CAPTURE_IMAGE)
hass.services.async_remove(DOMAIN, SERVICE_TRIGGER_AUTOMATION)
tasks = []
for platform in ABODE_PLATFORMS:
tasks.append(
hass.config_entries.async_forward_entry_unload(config_entry, platform)
)
await gather(*tasks)
await hass.async_add_executor_job(hass.data[DOMAIN].abode.events.stop)
await hass.async_add_executor_job(hass.data[DOMAIN].abode.logout)
hass.data[DOMAIN].logout_listener()
hass.data.pop(DOMAIN)
return True
def setup_hass_services(hass):
"""Home Assistant services."""
def change_setting(call):
"""Change an Abode system setting."""
setting = call.data.get(ATTR_SETTING)
value = call.data.get(ATTR_VALUE)
try:
hass.data[DOMAIN].abode.set_setting(setting, value)
except AbodeException as ex:
LOGGER.warning(ex)
def capture_image(call):
"""Capture a new image."""
entity_ids = call.data.get(ATTR_ENTITY_ID)
target_entities = [
entity_id
for entity_id in hass.data[DOMAIN].entity_ids
if entity_id in entity_ids
]
for entity_id in target_entities:
signal = f"abode_camera_capture_{entity_id}"
dispatcher_send(hass, signal)
def trigger_automation(call):
"""Trigger an Abode automation."""
entity_ids = call.data.get(ATTR_ENTITY_ID)
target_entities = [
entity_id
for entity_id in hass.data[DOMAIN].entity_ids
if entity_id in entity_ids
]
for entity_id in target_entities:
signal = f"abode_trigger_automation_{entity_id}"
dispatcher_send(hass, signal)
hass.services.register(
DOMAIN, SERVICE_SETTINGS, change_setting, schema=CHANGE_SETTING_SCHEMA
)
hass.services.register(
DOMAIN, SERVICE_CAPTURE_IMAGE, capture_image, schema=CAPTURE_IMAGE_SCHEMA
)
hass.services.register(
DOMAIN, SERVICE_TRIGGER_AUTOMATION, trigger_automation, schema=AUTOMATION_SCHEMA
)
async def setup_hass_events(hass):
"""Home Assistant start and stop callbacks."""
def logout(event):
"""Logout of Abode."""
if not hass.data[DOMAIN].polling:
hass.data[DOMAIN].abode.events.stop()
hass.data[DOMAIN].abode.logout()
LOGGER.info("Logged out of Abode")
if not hass.data[DOMAIN].polling:
await hass.async_add_executor_job(hass.data[DOMAIN].abode.events.start)
hass.data[DOMAIN].logout_listener = hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, logout
)
def setup_abode_events(hass):
"""Event callbacks."""
def event_callback(event, event_json):
"""Handle an event callback from Abode."""
data = {
ATTR_DEVICE_ID: event_json.get(ATTR_DEVICE_ID, ""),
ATTR_DEVICE_NAME: event_json.get(ATTR_DEVICE_NAME, ""),
ATTR_DEVICE_TYPE: event_json.get(ATTR_DEVICE_TYPE, ""),
ATTR_EVENT_CODE: event_json.get(ATTR_EVENT_CODE, ""),
ATTR_EVENT_NAME: event_json.get(ATTR_EVENT_NAME, ""),
ATTR_EVENT_TYPE: event_json.get(ATTR_EVENT_TYPE, ""),
ATTR_EVENT_UTC: event_json.get(ATTR_EVENT_UTC, ""),
ATTR_USER_NAME: event_json.get(ATTR_USER_NAME, ""),
ATTR_APP_TYPE: event_json.get(ATTR_APP_TYPE, ""),
ATTR_EVENT_BY: event_json.get(ATTR_EVENT_BY, ""),
ATTR_DATE: event_json.get(ATTR_DATE, ""),
ATTR_TIME: event_json.get(ATTR_TIME, ""),
}
hass.bus.fire(event, data)
events = [
TIMELINE.ALARM_GROUP,
TIMELINE.ALARM_END_GROUP,
TIMELINE.PANEL_FAULT_GROUP,
TIMELINE.PANEL_RESTORE_GROUP,
TIMELINE.AUTOMATION_GROUP,
TIMELINE.DISARM_GROUP,
TIMELINE.ARM_GROUP,
TIMELINE.ARM_FAULT_GROUP,
TIMELINE.TEST_GROUP,
TIMELINE.CAPTURE_GROUP,
TIMELINE.DEVICE_GROUP,
]
for event in events:
hass.data[DOMAIN].abode.events.add_event_callback(
event, partial(event_callback, event)
)
class AbodeEntity(Entity):
"""Representation of an Abode entity."""
def __init__(self, data):
"""Initialize Abode entity."""
self._data = data
self._available = True
@property
def available(self):
"""Return the available state."""
return self._available
@property
def should_poll(self):
"""Return the polling state."""
return self._data.polling
async def async_added_to_hass(self):
"""Subscribe to Abode connection status updates."""
await self.hass.async_add_executor_job(
self._data.abode.events.add_connection_status_callback,
self.unique_id,
self._update_connection_status,
)
self.hass.data[DOMAIN].entity_ids.add(self.entity_id)
async def async_will_remove_from_hass(self):
"""Unsubscribe from Abode connection status updates."""
await self.hass.async_add_executor_job(
self._data.abode.events.remove_connection_status_callback, self.unique_id
)
def _update_connection_status(self):
"""Update the entity available property."""
self._available = self._data.abode.events.connected
self.schedule_update_ha_state()
class AbodeDevice(AbodeEntity):
"""Representation of an Abode device."""
def __init__(self, data, device):
"""Initialize Abode device."""
super().__init__(data)
self._device = device
async def async_added_to_hass(self):
"""Subscribe to device events."""
await super().async_added_to_hass()
await self.hass.async_add_executor_job(
self._data.abode.events.add_device_callback,
self._device.device_id,
self._update_callback,
)
async def async_will_remove_from_hass(self):
"""Unsubscribe from device events."""
await super().async_will_remove_from_hass()
await self.hass.async_add_executor_job(
self._data.abode.events.remove_all_device_callbacks, self._device.device_id
)
def update(self):
"""Update device state."""
self._device.refresh()
@property
def name(self):
"""Return the name of the device."""
return self._device.name
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
"device_id": self._device.device_id,
"battery_low": self._device.battery_low,
"no_response": self._device.no_response,
"device_type": self._device.type,
}
@property
def unique_id(self):
"""Return a unique ID to use for this device."""
return self._device.device_uuid
@property
def device_info(self):
"""Return device registry information for this entity."""
return {
"identifiers": {(DOMAIN, self._device.device_id)},
"manufacturer": "Abode",
"name": self._device.name,
"device_type": self._device.type,
}
def _update_callback(self, device):
"""Update the device state."""
self.schedule_update_ha_state()
class AbodeAutomation(AbodeEntity):
"""Representation of an Abode automation."""
def __init__(self, data, automation):
"""Initialize for Abode automation."""
super().__init__(data)
self._automation = automation
def update(self):
"""Update automation state."""
self._automation.refresh()
@property
def name(self):
"""Return the name of the automation."""
return self._automation.name
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {ATTR_ATTRIBUTION: ATTRIBUTION, "type": "CUE automation"}
@property
def unique_id(self):
"""Return a unique ID to use for this automation."""
return self._automation.automation_id
| |
"""
Lowest level connection
"""
import json
import logging
import random
import sys
import time
import uuid
from base64 import b64decode
from threading import local
from typing import Any, Dict, List, Mapping, Optional, Sequence
import botocore.client
import botocore.exceptions
from botocore.awsrequest import AWSPreparedRequest, create_request_object
from botocore.client import ClientError
from botocore.hooks import first_non_none_response
from botocore.exceptions import BotoCoreError
from botocore.session import get_session
from pynamodb.constants import (
RETURN_CONSUMED_CAPACITY_VALUES, RETURN_ITEM_COLL_METRICS_VALUES,
RETURN_ITEM_COLL_METRICS, RETURN_CONSUMED_CAPACITY, RETURN_VALUES_VALUES,
EXCLUSIVE_START_KEY, SCAN_INDEX_FORWARD, ATTR_DEFINITIONS,
BATCH_WRITE_ITEM, CONSISTENT_READ, DESCRIBE_TABLE, KEY_CONDITION_EXPRESSION,
BATCH_GET_ITEM, DELETE_REQUEST, SELECT_VALUES, RETURN_VALUES, REQUEST_ITEMS,
PROJECTION_EXPRESSION, SERVICE_NAME, DELETE_ITEM, PUT_REQUEST, UPDATE_ITEM, TABLE_NAME,
INDEX_NAME, KEY_SCHEMA, ATTR_NAME, ATTR_TYPE, TABLE_KEY, KEY_TYPE, GET_ITEM, UPDATE,
PUT_ITEM, SELECT, LIMIT, QUERY, SCAN, ITEM, LOCAL_SECONDARY_INDEXES,
KEYS, KEY, SEGMENT, TOTAL_SEGMENTS, CREATE_TABLE, PROVISIONED_THROUGHPUT, READ_CAPACITY_UNITS,
WRITE_CAPACITY_UNITS, GLOBAL_SECONDARY_INDEXES, PROJECTION, EXCLUSIVE_START_TABLE_NAME, TOTAL,
DELETE_TABLE, UPDATE_TABLE, LIST_TABLES, GLOBAL_SECONDARY_INDEX_UPDATES, ATTRIBUTES,
CONSUMED_CAPACITY, CAPACITY_UNITS, ATTRIBUTE_TYPES,
ITEMS, DEFAULT_ENCODING, BINARY, BINARY_SET, LAST_EVALUATED_KEY, RESPONSES, UNPROCESSED_KEYS,
UNPROCESSED_ITEMS, STREAM_SPECIFICATION, STREAM_VIEW_TYPE, STREAM_ENABLED,
EXPRESSION_ATTRIBUTE_NAMES, EXPRESSION_ATTRIBUTE_VALUES,
CONDITION_EXPRESSION, FILTER_EXPRESSION,
TRANSACT_WRITE_ITEMS, TRANSACT_GET_ITEMS, CLIENT_REQUEST_TOKEN, TRANSACT_ITEMS, TRANSACT_CONDITION_CHECK,
TRANSACT_GET, TRANSACT_PUT, TRANSACT_DELETE, TRANSACT_UPDATE, UPDATE_EXPRESSION,
RETURN_VALUES_ON_CONDITION_FAILURE_VALUES, RETURN_VALUES_ON_CONDITION_FAILURE,
AVAILABLE_BILLING_MODES, DEFAULT_BILLING_MODE, BILLING_MODE, PAY_PER_REQUEST_BILLING_MODE,
PROVISIONED_BILLING_MODE,
TIME_TO_LIVE_SPECIFICATION, ENABLED, UPDATE_TIME_TO_LIVE, TAGS, VALUE
)
from pynamodb.exceptions import (
TableError, QueryError, PutError, DeleteError, UpdateError, GetError, ScanError, TableDoesNotExist,
VerboseClientError,
TransactGetError, TransactWriteError)
from pynamodb.expressions.condition import Condition
from pynamodb.expressions.operand import Path
from pynamodb.expressions.projection import create_projection_expression
from pynamodb.expressions.update import Action, Update
from pynamodb.settings import get_settings_value, OperationSettings
from pynamodb.signals import pre_dynamodb_send, post_dynamodb_send
from pynamodb.types import HASH, RANGE
BOTOCORE_EXCEPTIONS = (BotoCoreError, ClientError)
RATE_LIMITING_ERROR_CODES = ['ProvisionedThroughputExceededException', 'ThrottlingException']
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
class MetaTable(object):
"""
A pythonic wrapper around table metadata
"""
def __init__(self, data: Optional[Dict]) -> None:
self.data = data or {}
self._range_keyname = None
self._hash_keyname = None
def __repr__(self) -> str:
if self.data:
return "MetaTable<{}>".format(self.data.get(TABLE_NAME))
return ""
@property
def range_keyname(self) -> Optional[str]:
"""
Returns the name of this table's range key
"""
if self._range_keyname is None:
for attr in self.data.get(KEY_SCHEMA, []):
if attr.get(KEY_TYPE) == RANGE:
self._range_keyname = attr.get(ATTR_NAME)
return self._range_keyname
@property
def hash_keyname(self) -> str:
"""
Returns the name of this table's hash key
"""
if self._hash_keyname is None:
for attr in self.data.get(KEY_SCHEMA, []):
if attr.get(KEY_TYPE) == HASH:
self._hash_keyname = attr.get(ATTR_NAME)
break
if self._hash_keyname is None:
raise ValueError("No hash_key found in key schema")
return self._hash_keyname
def get_key_names(self, index_name=None):
"""
Returns the names of the primary key attributes and index key attributes (if index_name is specified)
"""
key_names = [self.hash_keyname]
if self.range_keyname:
key_names.append(self.range_keyname)
if index_name is not None:
index_hash_keyname = self.get_index_hash_keyname(index_name)
if index_hash_keyname not in key_names:
key_names.append(index_hash_keyname)
index_range_keyname = self.get_index_range_keyname(index_name)
if index_range_keyname is not None and index_range_keyname not in key_names:
key_names.append(index_range_keyname)
return key_names
def has_index_name(self, index_name):
"""
Returns True if the base table has a global or local secondary index with index_name
"""
global_indexes = self.data.get(GLOBAL_SECONDARY_INDEXES)
local_indexes = self.data.get(LOCAL_SECONDARY_INDEXES)
indexes = (global_indexes or []) + (local_indexes or [])
return any(index.get(INDEX_NAME) == index_name for index in indexes)
def get_index_hash_keyname(self, index_name: str) -> str:
"""
Returns the name of the hash key for a given index
"""
global_indexes = self.data.get(GLOBAL_SECONDARY_INDEXES)
local_indexes = self.data.get(LOCAL_SECONDARY_INDEXES)
indexes = []
if local_indexes:
indexes += local_indexes
if global_indexes:
indexes += global_indexes
for index in indexes:
if index.get(INDEX_NAME) == index_name:
for schema_key in index.get(KEY_SCHEMA):
if schema_key.get(KEY_TYPE) == HASH:
return schema_key.get(ATTR_NAME)
raise ValueError("No hash key attribute for index: {}".format(index_name))
def get_index_range_keyname(self, index_name):
"""
Returns the name of the hash key for a given index
"""
global_indexes = self.data.get(GLOBAL_SECONDARY_INDEXES)
local_indexes = self.data.get(LOCAL_SECONDARY_INDEXES)
indexes = []
if local_indexes:
indexes += local_indexes
if global_indexes:
indexes += global_indexes
for index in indexes:
if index.get(INDEX_NAME) == index_name:
for schema_key in index.get(KEY_SCHEMA):
if schema_key.get(KEY_TYPE) == RANGE:
return schema_key.get(ATTR_NAME)
return None
def get_item_attribute_map(self, attributes: Dict, item_key=ITEM, pythonic_key: bool = True):
"""
Builds up a dynamodb compatible AttributeValue map
"""
if pythonic_key:
item_key = item_key
attr_map: Dict[str, Dict] = {
item_key: {}
}
for key, value in attributes.items():
# In this case, the user provided a mapping
# {'key': {'S': 'value'}}
if isinstance(value, dict):
attr_map[item_key][key] = value
else:
attr_map[item_key][key] = {
self.get_attribute_type(key): value
}
return attr_map
def get_attribute_type(self, attribute_name: str, value: Optional[Any] = None) -> str:
"""
Returns the proper attribute type for a given attribute name
"""
for attr in self.data.get(ATTR_DEFINITIONS, []):
if attr.get(ATTR_NAME) == attribute_name:
return attr.get(ATTR_TYPE)
if value is not None and isinstance(value, dict):
for key in ATTRIBUTE_TYPES:
if key in value:
return key
attr_names = [attr.get(ATTR_NAME) for attr in self.data.get(ATTR_DEFINITIONS, [])]
raise ValueError("No attribute {} in {}".format(attribute_name, attr_names))
def get_identifier_map(self, hash_key: str, range_key: Optional[str] = None, key: str = KEY):
"""
Builds the identifier map that is common to several operations
"""
kwargs: Dict[str, Any] = {
key: {
self.hash_keyname: {
self.get_attribute_type(self.hash_keyname): hash_key
}
}
}
if range_key is not None and self.range_keyname is not None:
kwargs[key][self.range_keyname] = {
self.get_attribute_type(self.range_keyname): range_key
}
return kwargs
def get_exclusive_start_key_map(self, exclusive_start_key):
"""
Builds the exclusive start key attribute map
"""
if isinstance(exclusive_start_key, dict) and self.hash_keyname in exclusive_start_key:
# This is useful when paginating results, as the LastEvaluatedKey returned is already
# structured properly
return {
EXCLUSIVE_START_KEY: exclusive_start_key
}
else:
return {
EXCLUSIVE_START_KEY: {
self.hash_keyname: {
self.get_attribute_type(self.hash_keyname): exclusive_start_key
}
}
}
class Connection(object):
"""
A higher level abstraction over botocore
"""
def __init__(self,
region: Optional[str] = None,
host: Optional[str] = None,
read_timeout_seconds: Optional[float] = None,
connect_timeout_seconds: Optional[float] = None,
max_retry_attempts: Optional[int] = None,
base_backoff_ms: Optional[int] = None,
max_pool_connections: Optional[int] = None,
extra_headers: Optional[Mapping[str, str]] = None):
self._tables: Dict[str, MetaTable] = {}
self.host = host
self._local = local()
self._client = None
if region:
self.region = region
else:
self.region = get_settings_value('region')
if connect_timeout_seconds is not None:
self._connect_timeout_seconds = connect_timeout_seconds
else:
self._connect_timeout_seconds = get_settings_value('connect_timeout_seconds')
if read_timeout_seconds is not None:
self._read_timeout_seconds = read_timeout_seconds
else:
self._read_timeout_seconds = get_settings_value('read_timeout_seconds')
if max_retry_attempts is not None:
self._max_retry_attempts_exception = max_retry_attempts
else:
self._max_retry_attempts_exception = get_settings_value('max_retry_attempts')
if base_backoff_ms is not None:
self._base_backoff_ms = base_backoff_ms
else:
self._base_backoff_ms = get_settings_value('base_backoff_ms')
if max_pool_connections is not None:
self._max_pool_connections = max_pool_connections
else:
self._max_pool_connections = get_settings_value('max_pool_connections')
if extra_headers is not None:
self._extra_headers = extra_headers
else:
self._extra_headers = get_settings_value('extra_headers')
def __repr__(self) -> str:
return "Connection<{}>".format(self.client.meta.endpoint_url)
def _sign_request(self, request):
auth = self.client._request_signer.get_auth_instance(
self.client._request_signer.signing_name,
self.client._request_signer.region_name,
self.client._request_signer.signature_version)
auth.add_auth(request)
def _create_prepared_request(
self,
params: Dict,
settings: OperationSettings,
) -> AWSPreparedRequest:
request = create_request_object(params)
self._sign_request(request)
prepared_request = self.client._endpoint.prepare_request(request)
if self._extra_headers is not None:
prepared_request.headers.update(self._extra_headers)
if settings.extra_headers is not None:
prepared_request.headers.update(settings.extra_headers)
return prepared_request
def dispatch(self, operation_name: str, operation_kwargs: Dict, settings: OperationSettings = OperationSettings.default) -> Dict:
"""
Dispatches `operation_name` with arguments `operation_kwargs`
Raises TableDoesNotExist if the specified table does not exist
"""
if operation_name not in [DESCRIBE_TABLE, LIST_TABLES, UPDATE_TABLE, UPDATE_TIME_TO_LIVE, DELETE_TABLE, CREATE_TABLE]:
if RETURN_CONSUMED_CAPACITY not in operation_kwargs:
operation_kwargs.update(self.get_consumed_capacity_map(TOTAL))
log.debug("Calling %s with arguments %s", operation_name, operation_kwargs)
table_name = operation_kwargs.get(TABLE_NAME)
req_uuid = uuid.uuid4()
self.send_pre_boto_callback(operation_name, req_uuid, table_name)
data = self._make_api_call(operation_name, operation_kwargs, settings)
self.send_post_boto_callback(operation_name, req_uuid, table_name)
if data and CONSUMED_CAPACITY in data:
capacity = data.get(CONSUMED_CAPACITY)
if isinstance(capacity, dict) and CAPACITY_UNITS in capacity:
capacity = capacity.get(CAPACITY_UNITS)
log.debug("%s %s consumed %s units", data.get(TABLE_NAME, ''), operation_name, capacity)
return data
def send_post_boto_callback(self, operation_name, req_uuid, table_name):
try:
post_dynamodb_send.send(self, operation_name=operation_name, table_name=table_name, req_uuid=req_uuid)
except Exception as e:
log.exception("post_boto callback threw an exception.")
def send_pre_boto_callback(self, operation_name, req_uuid, table_name):
try:
pre_dynamodb_send.send(self, operation_name=operation_name, table_name=table_name, req_uuid=req_uuid)
except Exception as e:
log.exception("pre_boto callback threw an exception.")
def _make_api_call(self, operation_name: str, operation_kwargs: Dict, settings: OperationSettings = OperationSettings.default) -> Dict:
"""
This private method is here for two reasons:
1. It's faster to avoid using botocore's response parsing
2. It provides a place to monkey patch HTTP requests for unit testing
"""
operation_model = self.client._service_model.operation_model(operation_name)
request_dict = self.client._convert_to_request_dict(
operation_kwargs,
operation_model,
)
for i in range(0, self._max_retry_attempts_exception + 1):
attempt_number = i + 1
is_last_attempt_for_exceptions = i == self._max_retry_attempts_exception
http_response = None
prepared_request = None
try:
if prepared_request is not None:
# If there is a stream associated with the request, we need
# to reset it before attempting to send the request again.
# This will ensure that we resend the entire contents of the
# body.
prepared_request.reset_stream()
# Create a new request for each retry (including a new signature).
prepared_request = self._create_prepared_request(request_dict, settings)
# Implement the before-send event from botocore
event_name = 'before-send.dynamodb.{}'.format(operation_model.name)
event_responses = self.client._endpoint._event_emitter.emit(event_name, request=prepared_request)
event_response = first_non_none_response(event_responses)
if event_response is None:
http_response = self.client._endpoint.http_session.send(prepared_request)
else:
http_response = event_response
is_last_attempt_for_exceptions = True # don't retry if we have an event response
# json.loads accepts bytes in >= 3.6.0
if sys.version_info < (3, 6, 0):
data = json.loads(http_response.text)
else:
data = json.loads(http_response.content)
except (ValueError, botocore.exceptions.HTTPClientError, botocore.exceptions.ConnectionError) as e:
if is_last_attempt_for_exceptions:
log.debug('Reached the maximum number of retry attempts: %s', attempt_number)
if http_response:
e.args += (http_response.text,)
raise
else:
# No backoff for fast-fail exceptions that likely failed at the frontend
log.debug(
'Retry needed for (%s) after attempt %s, retryable %s caught: %s',
operation_name,
attempt_number,
e.__class__.__name__,
e
)
continue
status_code = http_response.status_code
headers = http_response.headers
if status_code >= 300:
# Extract error code from __type
code = data.get('__type', '')
if '#' in code:
code = code.rsplit('#', 1)[1]
botocore_expected_format = {'Error': {'Message': data.get('message', '') or data.get('Message', ''), 'Code': code}}
verbose_properties = {
'request_id': headers.get('x-amzn-RequestId')
}
if REQUEST_ITEMS in operation_kwargs:
# Batch operations can hit multiple tables, report them comma separated
verbose_properties['table_name'] = ','.join(operation_kwargs[REQUEST_ITEMS])
elif TRANSACT_ITEMS in operation_kwargs:
# Transactional operations can also hit multiple tables, or have multiple updates within
# the same table
table_names = []
for item in operation_kwargs[TRANSACT_ITEMS]:
for op in item.values():
table_names.append(op[TABLE_NAME])
verbose_properties['table_name'] = ','.join(table_names)
else:
verbose_properties['table_name'] = operation_kwargs.get(TABLE_NAME)
try:
raise VerboseClientError(botocore_expected_format, operation_name, verbose_properties)
except VerboseClientError as e:
if is_last_attempt_for_exceptions:
log.debug('Reached the maximum number of retry attempts: %s', attempt_number)
raise
elif status_code < 500 and code not in RATE_LIMITING_ERROR_CODES:
# We don't retry on a ConditionalCheckFailedException or other 4xx (except for
# throughput related errors) because we assume they will fail in perpetuity.
# Retrying when there is already contention could cause other problems
# in part due to unnecessary consumption of throughput.
raise
else:
# We use fully-jittered exponentially-backed-off retries:
# https://www.awsarchitectureblog.com/2015/03/backoff.html
sleep_time_ms = random.randint(0, self._base_backoff_ms * (2 ** i))
log.debug(
'Retry with backoff needed for (%s) after attempt %s,'
'sleeping for %s milliseconds, retryable %s caught: %s',
operation_name,
attempt_number,
sleep_time_ms,
e.__class__.__name__,
e
)
time.sleep(sleep_time_ms / 1000.0)
continue
return self._handle_binary_attributes(data)
assert False # unreachable code
@staticmethod
def _handle_binary_attributes(data):
""" Simulate botocore's binary attribute handling """
if ITEM in data:
for attr in data[ITEM].values():
_convert_binary(attr)
if ITEMS in data:
for item in data[ITEMS]:
for attr in item.values():
_convert_binary(attr)
if RESPONSES in data:
if isinstance(data[RESPONSES], list):
for item in data[RESPONSES]:
for attr in item.values():
_convert_binary(attr)
else:
for item_list in data[RESPONSES].values():
for item in item_list:
for attr in item.values():
_convert_binary(attr)
if LAST_EVALUATED_KEY in data:
for attr in data[LAST_EVALUATED_KEY].values():
_convert_binary(attr)
if UNPROCESSED_KEYS in data:
for table_data in data[UNPROCESSED_KEYS].values():
for item in table_data[KEYS]:
for attr in item.values():
_convert_binary(attr)
if UNPROCESSED_ITEMS in data:
for table_unprocessed_requests in data[UNPROCESSED_ITEMS].values():
for request in table_unprocessed_requests:
for item_mapping in request.values():
for item in item_mapping.values():
for attr in item.values():
_convert_binary(attr)
if ATTRIBUTES in data:
for attr in data[ATTRIBUTES].values():
_convert_binary(attr)
return data
@property
def session(self) -> botocore.session.Session:
"""
Returns a valid botocore session
"""
# botocore client creation is not thread safe as of v1.2.5+ (see issue #153)
if getattr(self._local, 'session', None) is None:
self._local.session = get_session()
return self._local.session
@property
def client(self):
"""
Returns a botocore dynamodb client
"""
# botocore has a known issue where it will cache empty credentials
# https://github.com/boto/botocore/blob/4d55c9b4142/botocore/credentials.py#L1016-L1021
# if the client does not have credentials, we create a new client
# otherwise the client is permanently poisoned in the case of metadata service flakiness when using IAM roles
if not self._client or (self._client._request_signer and not self._client._request_signer._credentials):
config = botocore.client.Config(
parameter_validation=False, # Disable unnecessary validation for performance
connect_timeout=self._connect_timeout_seconds,
read_timeout=self._read_timeout_seconds,
max_pool_connections=self._max_pool_connections)
self._client = self.session.create_client(SERVICE_NAME, self.region, endpoint_url=self.host, config=config)
return self._client
def get_meta_table(self, table_name: str, refresh: bool = False):
"""
Returns a MetaTable
"""
if table_name not in self._tables or refresh:
operation_kwargs = {
TABLE_NAME: table_name
}
try:
data = self.dispatch(DESCRIBE_TABLE, operation_kwargs)
self._tables[table_name] = MetaTable(data.get(TABLE_KEY))
except BotoCoreError as e:
raise TableError("Unable to describe table: {}".format(e), e)
except ClientError as e:
if 'ResourceNotFound' in e.response['Error']['Code']:
raise TableDoesNotExist(e.response['Error']['Message'])
else:
raise
return self._tables[table_name]
def create_table(
self,
table_name: str,
attribute_definitions: Optional[Any] = None,
key_schema: Optional[Any] = None,
read_capacity_units: Optional[int] = None,
write_capacity_units: Optional[int] = None,
global_secondary_indexes: Optional[Any] = None,
local_secondary_indexes: Optional[Any] = None,
stream_specification: Optional[Dict] = None,
billing_mode: str = DEFAULT_BILLING_MODE,
tags: Optional[Dict[str, str]] = None,
) -> Dict:
"""
Performs the CreateTable operation
"""
operation_kwargs: Dict[str, Any] = {
TABLE_NAME: table_name,
BILLING_MODE: billing_mode,
PROVISIONED_THROUGHPUT: {
READ_CAPACITY_UNITS: read_capacity_units,
WRITE_CAPACITY_UNITS: write_capacity_units,
}
}
attrs_list = []
if attribute_definitions is None:
raise ValueError("attribute_definitions argument is required")
for attr in attribute_definitions:
attrs_list.append({
ATTR_NAME: attr.get('attribute_name'),
ATTR_TYPE: attr.get('attribute_type')
})
operation_kwargs[ATTR_DEFINITIONS] = attrs_list
if billing_mode not in AVAILABLE_BILLING_MODES:
raise ValueError("incorrect value for billing_mode, available modes: {}".format(AVAILABLE_BILLING_MODES))
if billing_mode == PAY_PER_REQUEST_BILLING_MODE:
del operation_kwargs[PROVISIONED_THROUGHPUT]
elif billing_mode == PROVISIONED_BILLING_MODE:
del operation_kwargs[BILLING_MODE]
if global_secondary_indexes:
global_secondary_indexes_list = []
for index in global_secondary_indexes:
index_kwargs = {
INDEX_NAME: index.get('index_name'),
KEY_SCHEMA: sorted(index.get('key_schema'), key=lambda x: x.get(KEY_TYPE)),
PROJECTION: index.get('projection'),
PROVISIONED_THROUGHPUT: index.get('provisioned_throughput')
}
if billing_mode == PAY_PER_REQUEST_BILLING_MODE:
del index_kwargs[PROVISIONED_THROUGHPUT]
global_secondary_indexes_list.append(index_kwargs)
operation_kwargs[GLOBAL_SECONDARY_INDEXES] = global_secondary_indexes_list
if key_schema is None:
raise ValueError("key_schema is required")
key_schema_list = []
for item in key_schema:
key_schema_list.append({
ATTR_NAME: item.get('attribute_name'),
KEY_TYPE: str(item.get('key_type')).upper()
})
operation_kwargs[KEY_SCHEMA] = sorted(key_schema_list, key=lambda x: x.get(KEY_TYPE))
local_secondary_indexes_list = []
if local_secondary_indexes:
for index in local_secondary_indexes:
local_secondary_indexes_list.append({
INDEX_NAME: index.get('index_name'),
KEY_SCHEMA: sorted(index.get('key_schema'), key=lambda x: x.get(KEY_TYPE)),
PROJECTION: index.get('projection'),
})
operation_kwargs[LOCAL_SECONDARY_INDEXES] = local_secondary_indexes_list
if stream_specification:
operation_kwargs[STREAM_SPECIFICATION] = {
STREAM_ENABLED: stream_specification['stream_enabled'],
STREAM_VIEW_TYPE: stream_specification['stream_view_type']
}
if tags:
operation_kwargs[TAGS] = [
{
KEY: k,
VALUE: v
} for k, v in tags.items()
]
try:
data = self.dispatch(CREATE_TABLE, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise TableError("Failed to create table: {}".format(e), e)
return data
def update_time_to_live(self, table_name: str, ttl_attribute_name: str) -> Dict:
"""
Performs the UpdateTimeToLive operation
"""
operation_kwargs = {
TABLE_NAME: table_name,
TIME_TO_LIVE_SPECIFICATION: {
ATTR_NAME: ttl_attribute_name,
ENABLED: True,
}
}
try:
return self.dispatch(UPDATE_TIME_TO_LIVE, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise TableError("Failed to update TTL on table: {}".format(e), e)
def delete_table(self, table_name: str) -> Dict:
"""
Performs the DeleteTable operation
"""
operation_kwargs = {
TABLE_NAME: table_name
}
try:
data = self.dispatch(DELETE_TABLE, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise TableError("Failed to delete table: {}".format(e), e)
return data
def update_table(
self,
table_name: str,
read_capacity_units: Optional[int] = None,
write_capacity_units: Optional[int] = None,
global_secondary_index_updates: Optional[Any] = None,
) -> Dict:
"""
Performs the UpdateTable operation
"""
operation_kwargs: Dict[str, Any] = {
TABLE_NAME: table_name
}
if read_capacity_units and not write_capacity_units or write_capacity_units and not read_capacity_units:
raise ValueError("read_capacity_units and write_capacity_units are required together")
if read_capacity_units and write_capacity_units:
operation_kwargs[PROVISIONED_THROUGHPUT] = {
READ_CAPACITY_UNITS: read_capacity_units,
WRITE_CAPACITY_UNITS: write_capacity_units
}
if global_secondary_index_updates:
global_secondary_indexes_list = []
for index in global_secondary_index_updates:
global_secondary_indexes_list.append({
UPDATE: {
INDEX_NAME: index.get('index_name'),
PROVISIONED_THROUGHPUT: {
READ_CAPACITY_UNITS: index.get('read_capacity_units'),
WRITE_CAPACITY_UNITS: index.get('write_capacity_units')
}
}
})
operation_kwargs[GLOBAL_SECONDARY_INDEX_UPDATES] = global_secondary_indexes_list
try:
return self.dispatch(UPDATE_TABLE, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise TableError("Failed to update table: {}".format(e), e)
def list_tables(
self,
exclusive_start_table_name: Optional[str] = None,
limit: Optional[int] = None,
) -> Dict:
"""
Performs the ListTables operation
"""
operation_kwargs: Dict[str, Any] = {}
if exclusive_start_table_name:
operation_kwargs.update({
EXCLUSIVE_START_TABLE_NAME: exclusive_start_table_name
})
if limit is not None:
operation_kwargs.update({
LIMIT: limit
})
try:
return self.dispatch(LIST_TABLES, operation_kwargs)
except BOTOCORE_EXCEPTIONS as e:
raise TableError("Unable to list tables: {}".format(e), e)
def describe_table(self, table_name: str) -> Dict:
"""
Performs the DescribeTable operation
"""
try:
tbl = self.get_meta_table(table_name, refresh=True)
if tbl:
return tbl.data
except ValueError:
pass
raise TableDoesNotExist(table_name)
def get_item_attribute_map(
self,
table_name: str,
attributes: Any,
item_key: str = ITEM,
pythonic_key: bool = True,
) -> Dict:
"""
Builds up a dynamodb compatible AttributeValue map
"""
tbl = self.get_meta_table(table_name)
if tbl is None:
raise TableError("No such table {}".format(table_name))
return tbl.get_item_attribute_map(
attributes,
item_key=item_key,
pythonic_key=pythonic_key)
def parse_attribute(
self,
attribute: Any,
return_type: bool = False
) -> Any:
"""
Returns the attribute value, where the attribute can be
a raw attribute value, or a dictionary containing the type:
{'S': 'String value'}
"""
if isinstance(attribute, dict):
for key in ATTRIBUTE_TYPES:
if key in attribute:
if return_type:
return key, attribute.get(key)
return attribute.get(key)
raise ValueError("Invalid attribute supplied: {}".format(attribute))
else:
if return_type:
return None, attribute
return attribute
def get_attribute_type(
self,
table_name: str,
attribute_name: str,
value: Optional[Any] = None
) -> str:
"""
Returns the proper attribute type for a given attribute name
:param value: The attribute value an be supplied just in case the type is already included
"""
tbl = self.get_meta_table(table_name)
if tbl is None:
raise TableError("No such table {}".format(table_name))
return tbl.get_attribute_type(attribute_name, value=value)
def get_identifier_map(
self,
table_name: str,
hash_key: str,
range_key: Optional[str] = None,
key: str = KEY
) -> Dict:
"""
Builds the identifier map that is common to several operations
"""
tbl = self.get_meta_table(table_name)
if tbl is None:
raise TableError("No such table {}".format(table_name))
return tbl.get_identifier_map(hash_key, range_key=range_key, key=key)
def get_consumed_capacity_map(self, return_consumed_capacity: str) -> Dict:
"""
Builds the consumed capacity map that is common to several operations
"""
if return_consumed_capacity.upper() not in RETURN_CONSUMED_CAPACITY_VALUES:
raise ValueError("{} must be one of {}".format(RETURN_ITEM_COLL_METRICS, RETURN_CONSUMED_CAPACITY_VALUES))
return {
RETURN_CONSUMED_CAPACITY: str(return_consumed_capacity).upper()
}
def get_return_values_map(self, return_values: str) -> Dict:
"""
Builds the return values map that is common to several operations
"""
if return_values.upper() not in RETURN_VALUES_VALUES:
raise ValueError("{} must be one of {}".format(RETURN_VALUES, RETURN_VALUES_VALUES))
return {
RETURN_VALUES: str(return_values).upper()
}
def get_return_values_on_condition_failure_map(
self,
return_values_on_condition_failure: str
) -> Dict:
"""
Builds the return values map that is common to several operations
"""
if return_values_on_condition_failure.upper() not in RETURN_VALUES_VALUES:
raise ValueError("{} must be one of {}".format(
RETURN_VALUES_ON_CONDITION_FAILURE,
RETURN_VALUES_ON_CONDITION_FAILURE_VALUES
))
return {
RETURN_VALUES_ON_CONDITION_FAILURE: str(return_values_on_condition_failure).upper()
}
def get_item_collection_map(self, return_item_collection_metrics: str) -> Dict:
"""
Builds the item collection map
"""
if return_item_collection_metrics.upper() not in RETURN_ITEM_COLL_METRICS_VALUES:
raise ValueError("{} must be one of {}".format(RETURN_ITEM_COLL_METRICS, RETURN_ITEM_COLL_METRICS_VALUES))
return {
RETURN_ITEM_COLL_METRICS: str(return_item_collection_metrics).upper()
}
def get_exclusive_start_key_map(self, table_name: str, exclusive_start_key: str) -> Dict:
"""
Builds the exclusive start key attribute map
"""
tbl = self.get_meta_table(table_name)
if tbl is None:
raise TableError("No such table {}".format(table_name))
return tbl.get_exclusive_start_key_map(exclusive_start_key)
def get_operation_kwargs(
self,
table_name: str,
hash_key: str,
range_key: Optional[str] = None,
key: str = KEY,
attributes: Optional[Any] = None,
attributes_to_get: Optional[Any] = None,
actions: Optional[Sequence[Action]] = None,
condition: Optional[Condition] = None,
consistent_read: Optional[bool] = None,
return_values: Optional[str] = None,
return_consumed_capacity: Optional[str] = None,
return_item_collection_metrics: Optional[str] = None,
return_values_on_condition_failure: Optional[str] = None
) -> Dict:
self._check_condition('condition', condition)
operation_kwargs: Dict[str, Any] = {}
name_placeholders: Dict[str, str] = {}
expression_attribute_values: Dict[str, Any] = {}
operation_kwargs[TABLE_NAME] = table_name
operation_kwargs.update(self.get_identifier_map(table_name, hash_key, range_key, key=key))
if attributes and operation_kwargs.get(ITEM) is not None:
attrs = self.get_item_attribute_map(table_name, attributes)
operation_kwargs[ITEM].update(attrs[ITEM])
if attributes_to_get is not None:
projection_expression = create_projection_expression(attributes_to_get, name_placeholders)
operation_kwargs[PROJECTION_EXPRESSION] = projection_expression
if condition is not None:
condition_expression = condition.serialize(name_placeholders, expression_attribute_values)
operation_kwargs[CONDITION_EXPRESSION] = condition_expression
if consistent_read is not None:
operation_kwargs[CONSISTENT_READ] = consistent_read
if return_values is not None:
operation_kwargs.update(self.get_return_values_map(return_values))
if return_values_on_condition_failure is not None:
operation_kwargs.update(self.get_return_values_on_condition_failure_map(return_values_on_condition_failure))
if return_consumed_capacity is not None:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
if return_item_collection_metrics is not None:
operation_kwargs.update(self.get_item_collection_map(return_item_collection_metrics))
if actions is not None:
update_expression = Update(*actions)
operation_kwargs[UPDATE_EXPRESSION] = update_expression.serialize(
name_placeholders,
expression_attribute_values
)
if name_placeholders:
operation_kwargs[EXPRESSION_ATTRIBUTE_NAMES] = self._reverse_dict(name_placeholders)
if expression_attribute_values:
operation_kwargs[EXPRESSION_ATTRIBUTE_VALUES] = expression_attribute_values
return operation_kwargs
def delete_item(
self,
table_name: str,
hash_key: str,
range_key: Optional[str] = None,
condition: Optional[Condition] = None,
return_values: Optional[str] = None,
return_consumed_capacity: Optional[str] = None,
return_item_collection_metrics: Optional[str] = None,
settings: OperationSettings = OperationSettings.default,
) -> Dict:
"""
Performs the DeleteItem operation and returns the result
"""
operation_kwargs = self.get_operation_kwargs(
table_name,
hash_key,
range_key=range_key,
condition=condition,
return_values=return_values,
return_consumed_capacity=return_consumed_capacity,
return_item_collection_metrics=return_item_collection_metrics
)
try:
return self.dispatch(DELETE_ITEM, operation_kwargs, settings)
except BOTOCORE_EXCEPTIONS as e:
raise DeleteError("Failed to delete item: {}".format(e), e)
def update_item(
self,
table_name: str,
hash_key: str,
range_key: Optional[str] = None,
actions: Optional[Sequence[Action]] = None,
condition: Optional[Condition] = None,
return_consumed_capacity: Optional[str] = None,
return_item_collection_metrics: Optional[str] = None,
return_values: Optional[str] = None,
settings: OperationSettings = OperationSettings.default,
) -> Dict:
"""
Performs the UpdateItem operation
"""
if not actions:
raise ValueError("'actions' cannot be empty")
operation_kwargs = self.get_operation_kwargs(
table_name=table_name,
hash_key=hash_key,
range_key=range_key,
actions=actions,
condition=condition,
return_values=return_values,
return_consumed_capacity=return_consumed_capacity,
return_item_collection_metrics=return_item_collection_metrics,
)
try:
return self.dispatch(UPDATE_ITEM, operation_kwargs, settings)
except BOTOCORE_EXCEPTIONS as e:
raise UpdateError("Failed to update item: {}".format(e), e)
def put_item(
self,
table_name: str,
hash_key: str,
range_key: Optional[str] = None,
attributes: Optional[Any] = None,
condition: Optional[Condition] = None,
return_values: Optional[str] = None,
return_consumed_capacity: Optional[str] = None,
return_item_collection_metrics: Optional[str] = None,
settings: OperationSettings = OperationSettings.default,
) -> Dict:
"""
Performs the PutItem operation and returns the result
"""
operation_kwargs = self.get_operation_kwargs(
table_name=table_name,
hash_key=hash_key,
range_key=range_key,
key=ITEM,
attributes=attributes,
condition=condition,
return_values=return_values,
return_consumed_capacity=return_consumed_capacity,
return_item_collection_metrics=return_item_collection_metrics
)
try:
return self.dispatch(PUT_ITEM, operation_kwargs, settings)
except BOTOCORE_EXCEPTIONS as e:
raise PutError("Failed to put item: {}".format(e), e)
def _get_transact_operation_kwargs(
self,
client_request_token: Optional[str] = None,
return_consumed_capacity: Optional[str] = None,
return_item_collection_metrics: Optional[str] = None
) -> Dict:
operation_kwargs = {}
if client_request_token is not None:
operation_kwargs[CLIENT_REQUEST_TOKEN] = client_request_token
if return_consumed_capacity is not None:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
if return_item_collection_metrics is not None:
operation_kwargs.update(self.get_item_collection_map(return_item_collection_metrics))
return operation_kwargs
def transact_write_items(
self,
condition_check_items: Sequence[Dict],
delete_items: Sequence[Dict],
put_items: Sequence[Dict],
update_items: Sequence[Dict],
client_request_token: Optional[str] = None,
return_consumed_capacity: Optional[str] = None,
return_item_collection_metrics: Optional[str] = None,
settings: OperationSettings = OperationSettings.default,
) -> Dict:
"""
Performs the TransactWrite operation and returns the result
"""
transact_items: List[Dict] = []
transact_items.extend(
{TRANSACT_CONDITION_CHECK: item} for item in condition_check_items
)
transact_items.extend(
{TRANSACT_DELETE: item} for item in delete_items
)
transact_items.extend(
{TRANSACT_PUT: item} for item in put_items
)
transact_items.extend(
{TRANSACT_UPDATE: item} for item in update_items
)
operation_kwargs = self._get_transact_operation_kwargs(
client_request_token=client_request_token,
return_consumed_capacity=return_consumed_capacity,
return_item_collection_metrics=return_item_collection_metrics
)
operation_kwargs[TRANSACT_ITEMS] = transact_items
try:
return self.dispatch(TRANSACT_WRITE_ITEMS, operation_kwargs, settings)
except BOTOCORE_EXCEPTIONS as e:
raise TransactWriteError("Failed to write transaction items", e)
def transact_get_items(
self,
get_items: Sequence[Dict],
return_consumed_capacity: Optional[str] = None,
settings: OperationSettings = OperationSettings.default,
) -> Dict:
"""
Performs the TransactGet operation and returns the result
"""
operation_kwargs = self._get_transact_operation_kwargs(return_consumed_capacity=return_consumed_capacity)
operation_kwargs[TRANSACT_ITEMS] = [
{TRANSACT_GET: item} for item in get_items
]
try:
return self.dispatch(TRANSACT_GET_ITEMS, operation_kwargs, settings)
except BOTOCORE_EXCEPTIONS as e:
raise TransactGetError("Failed to get transaction items", e)
def batch_write_item(
self,
table_name: str,
put_items: Optional[Any] = None,
delete_items: Optional[Any] = None,
return_consumed_capacity: Optional[str] = None,
return_item_collection_metrics: Optional[str] = None,
settings: OperationSettings = OperationSettings.default,
) -> Dict:
"""
Performs the batch_write_item operation
"""
if put_items is None and delete_items is None:
raise ValueError("Either put_items or delete_items must be specified")
operation_kwargs: Dict[str, Any] = {
REQUEST_ITEMS: {
table_name: []
}
}
if return_consumed_capacity:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
if return_item_collection_metrics:
operation_kwargs.update(self.get_item_collection_map(return_item_collection_metrics))
put_items_list = []
if put_items:
for item in put_items:
put_items_list.append({
PUT_REQUEST: self.get_item_attribute_map(table_name, item, pythonic_key=False)
})
delete_items_list = []
if delete_items:
for item in delete_items:
delete_items_list.append({
DELETE_REQUEST: self.get_item_attribute_map(table_name, item, item_key=KEY, pythonic_key=False)
})
operation_kwargs[REQUEST_ITEMS][table_name] = delete_items_list + put_items_list
try:
return self.dispatch(BATCH_WRITE_ITEM, operation_kwargs, settings)
except BOTOCORE_EXCEPTIONS as e:
raise PutError("Failed to batch write items: {}".format(e), e)
def batch_get_item(
self,
table_name: str,
keys: Sequence[str],
consistent_read: Optional[bool] = None,
return_consumed_capacity: Optional[str] = None,
attributes_to_get: Optional[Any] = None,
settings: OperationSettings = OperationSettings.default,
) -> Dict:
"""
Performs the batch get item operation
"""
operation_kwargs: Dict[str, Any] = {
REQUEST_ITEMS: {
table_name: {}
}
}
args_map: Dict[str, Any] = {}
name_placeholders: Dict[str, str] = {}
if consistent_read:
args_map[CONSISTENT_READ] = consistent_read
if return_consumed_capacity:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
if attributes_to_get is not None:
projection_expression = create_projection_expression(attributes_to_get, name_placeholders)
args_map[PROJECTION_EXPRESSION] = projection_expression
if name_placeholders:
args_map[EXPRESSION_ATTRIBUTE_NAMES] = self._reverse_dict(name_placeholders)
operation_kwargs[REQUEST_ITEMS][table_name].update(args_map)
keys_map: Dict[str, List] = {KEYS: []}
for key in keys:
keys_map[KEYS].append(
self.get_item_attribute_map(table_name, key)[ITEM]
)
operation_kwargs[REQUEST_ITEMS][table_name].update(keys_map)
try:
return self.dispatch(BATCH_GET_ITEM, operation_kwargs, settings)
except BOTOCORE_EXCEPTIONS as e:
raise GetError("Failed to batch get items: {}".format(e), e)
def get_item(
self,
table_name: str,
hash_key: str,
range_key: Optional[str] = None,
consistent_read: bool = False,
attributes_to_get: Optional[Any] = None,
settings: OperationSettings = OperationSettings.default,
) -> Dict:
"""
Performs the GetItem operation and returns the result
"""
operation_kwargs = self.get_operation_kwargs(
table_name=table_name,
hash_key=hash_key,
range_key=range_key,
consistent_read=consistent_read,
attributes_to_get=attributes_to_get
)
try:
return self.dispatch(GET_ITEM, operation_kwargs, settings)
except BOTOCORE_EXCEPTIONS as e:
raise GetError("Failed to get item: {}".format(e), e)
def scan(
self,
table_name: str,
filter_condition: Optional[Any] = None,
attributes_to_get: Optional[Any] = None,
limit: Optional[int] = None,
return_consumed_capacity: Optional[str] = None,
exclusive_start_key: Optional[str] = None,
segment: Optional[int] = None,
total_segments: Optional[int] = None,
consistent_read: Optional[bool] = None,
index_name: Optional[str] = None,
settings: OperationSettings = OperationSettings.default,
) -> Dict:
"""
Performs the scan operation
"""
self._check_condition('filter_condition', filter_condition)
operation_kwargs: Dict[str, Any] = {TABLE_NAME: table_name}
name_placeholders: Dict[str, str] = {}
expression_attribute_values: Dict[str, Any] = {}
if filter_condition is not None:
filter_expression = filter_condition.serialize(name_placeholders, expression_attribute_values)
operation_kwargs[FILTER_EXPRESSION] = filter_expression
if attributes_to_get is not None:
projection_expression = create_projection_expression(attributes_to_get, name_placeholders)
operation_kwargs[PROJECTION_EXPRESSION] = projection_expression
if index_name:
operation_kwargs[INDEX_NAME] = index_name
if limit is not None:
operation_kwargs[LIMIT] = limit
if return_consumed_capacity:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
if exclusive_start_key:
operation_kwargs.update(self.get_exclusive_start_key_map(table_name, exclusive_start_key))
if segment is not None:
operation_kwargs[SEGMENT] = segment
if total_segments:
operation_kwargs[TOTAL_SEGMENTS] = total_segments
if consistent_read:
operation_kwargs[CONSISTENT_READ] = consistent_read
if name_placeholders:
operation_kwargs[EXPRESSION_ATTRIBUTE_NAMES] = self._reverse_dict(name_placeholders)
if expression_attribute_values:
operation_kwargs[EXPRESSION_ATTRIBUTE_VALUES] = expression_attribute_values
try:
return self.dispatch(SCAN, operation_kwargs, settings)
except BOTOCORE_EXCEPTIONS as e:
raise ScanError("Failed to scan table: {}".format(e), e)
def query(
self,
table_name: str,
hash_key: str,
range_key_condition: Optional[Condition] = None,
filter_condition: Optional[Any] = None,
attributes_to_get: Optional[Any] = None,
consistent_read: bool = False,
exclusive_start_key: Optional[Any] = None,
index_name: Optional[str] = None,
limit: Optional[int] = None,
return_consumed_capacity: Optional[str] = None,
scan_index_forward: Optional[bool] = None,
select: Optional[str] = None,
settings: OperationSettings = OperationSettings.default,
) -> Dict:
"""
Performs the Query operation and returns the result
"""
self._check_condition('range_key_condition', range_key_condition)
self._check_condition('filter_condition', filter_condition)
operation_kwargs: Dict[str, Any] = {TABLE_NAME: table_name}
name_placeholders: Dict[str, str] = {}
expression_attribute_values: Dict[str, Any] = {}
tbl = self.get_meta_table(table_name)
if tbl is None:
raise TableError("No such table: {}".format(table_name))
if index_name:
if not tbl.has_index_name(index_name):
raise ValueError("Table {} has no index: {}".format(table_name, index_name))
hash_keyname = tbl.get_index_hash_keyname(index_name)
else:
hash_keyname = tbl.hash_keyname
hash_condition_value = {self.get_attribute_type(table_name, hash_keyname, hash_key): self.parse_attribute(hash_key)}
key_condition = Path([hash_keyname]) == hash_condition_value
if range_key_condition is not None:
key_condition &= range_key_condition
operation_kwargs[KEY_CONDITION_EXPRESSION] = key_condition.serialize(
name_placeholders, expression_attribute_values)
if filter_condition is not None:
filter_expression = filter_condition.serialize(name_placeholders, expression_attribute_values)
operation_kwargs[FILTER_EXPRESSION] = filter_expression
if attributes_to_get:
projection_expression = create_projection_expression(attributes_to_get, name_placeholders)
operation_kwargs[PROJECTION_EXPRESSION] = projection_expression
if consistent_read:
operation_kwargs[CONSISTENT_READ] = True
if exclusive_start_key:
operation_kwargs.update(self.get_exclusive_start_key_map(table_name, exclusive_start_key))
if index_name:
operation_kwargs[INDEX_NAME] = index_name
if limit is not None:
operation_kwargs[LIMIT] = limit
if return_consumed_capacity:
operation_kwargs.update(self.get_consumed_capacity_map(return_consumed_capacity))
if select:
if select.upper() not in SELECT_VALUES:
raise ValueError("{} must be one of {}".format(SELECT, SELECT_VALUES))
operation_kwargs[SELECT] = str(select).upper()
if scan_index_forward is not None:
operation_kwargs[SCAN_INDEX_FORWARD] = scan_index_forward
if name_placeholders:
operation_kwargs[EXPRESSION_ATTRIBUTE_NAMES] = self._reverse_dict(name_placeholders)
if expression_attribute_values:
operation_kwargs[EXPRESSION_ATTRIBUTE_VALUES] = expression_attribute_values
try:
return self.dispatch(QUERY, operation_kwargs, settings)
except BOTOCORE_EXCEPTIONS as e:
raise QueryError("Failed to query items: {}".format(e), e)
def _check_condition(self, name, condition):
if condition is not None:
if not isinstance(condition, Condition):
raise ValueError("'{}' must be an instance of Condition".format(name))
@staticmethod
def _reverse_dict(d):
return {v: k for k, v in d.items()}
def _convert_binary(attr):
if BINARY in attr:
attr[BINARY] = b64decode(attr[BINARY].encode(DEFAULT_ENCODING))
elif BINARY_SET in attr:
value = attr[BINARY_SET]
if value and len(value):
attr[BINARY_SET] = {b64decode(v.encode(DEFAULT_ENCODING)) for v in value}
| |
#!/usr/bin/env python
"""
This is a contextualization script required by CloudMan; it is automatically run
at instance startup (via an upstart job).
Requires:
PyYAML http://pyyaml.org/wiki/PyYAMLDocumentation (easy_install pyyaml)
boto http://code.google.com/p/boto/ (easy_install boto)
Assumptions:
DEFAULT_BUCKET_NAME and DEFAULT_BOOT_SCRIPT_NAME are publicly accessible and
do not require any form of authentication
"""
import os, sys, yaml, urllib2, logging, hashlib, time, subprocess, random
from urlparse import urlparse
from boto.s3.key import Key
from boto.s3.connection import S3Connection
from boto.exception import S3ResponseError
from boto.s3.connection import OrdinaryCallingFormat
logging.getLogger('boto').setLevel(logging.INFO) # Only log boto messages >=INFO
log = None
USER_DATA_URL = 'http://169.254.169.254/latest/user-data'
# USER_DATA_URL = 'http://userwww.service.emory.edu/~eafgan/content/userData.yaml.sample' # used for testing
# USER_DATA_URL = 'http://userwww.service.emory.edu/~eafgan/content/url_ud.txt' # used for testing
LOCAL_PATH = '/tmp/cm' # Local path destination used for storing/reading any files created by this script
USER_DATA_FILE_NAME = 'userData.yaml' # Local file with user data formatted by this script
USER_DATA_FILE = os.path.join(LOCAL_PATH, USER_DATA_FILE_NAME) # The final/processed UD file
# Local file containing UD in its original format
USER_DATA_ORIG = os.path.join(LOCAL_PATH, 'original_%s' % USER_DATA_FILE_NAME)
SERVICE_ROOT = 'http://s3.amazonaws.com/' # Obviously, customized for Amazon's S3
DEFAULT_BUCKET_NAME = 'cloudman' # Ensure this bucket is accessible to anyone!
DEFAULT_BOOT_SCRIPT_NAME = 'cm_boot.py' # Ensure this file is accessible to anyone in the public bucket!
CLOUDMAN_HOME = '/mnt/cm'
# ====================== Utility methods ======================
def _setup_logging():
# Logging setup
formatter = logging.Formatter("[%(levelname)s] %(module)s:%(lineno)d %(asctime)s: %(message)s")
console = logging.StreamHandler() # log to console - used during testing
# console.setLevel(logging.INFO) # accepts >INFO levels
console.setFormatter(formatter)
# log_file = logging.FileHandler(os.path.join(LOCAL_PATH, "%s.log" % os.path.splitext(sys.argv[0])[0]), 'w')
# log_file.setLevel(logging.DEBUG) # accepts all levels
# log_file.setFormatter(formatter)
log = logging.root
log.addHandler(console)
# log.addHandler(log_file)
log.setLevel(logging.DEBUG)
return log
def _get_user_data():
ud = ''
for i in range(0, 5):
try:
log.info("Getting user data from '%s', attempt %s" % (USER_DATA_URL, i))
fp = urllib2.urlopen(USER_DATA_URL)
ud = fp.read()
fp.close()
log.debug("Saving user data in its original format to file '%s'" % USER_DATA_ORIG)
with open(USER_DATA_ORIG, 'w') as ud_orig:
ud_orig.write(ud)
if ud:
log.debug("Got user data")
return ud
except IOError:
log.info("User data not found. Setting it to empty.")
return ''
# Used for testing
# return 'http://s3.amazonaws.com/cloudman/cm_boot'
# return ''
# return "gc_dev1|<account_key>|<secret_key>|somePWD"
# with open('sample.yaml') as ud_yaml:
# ud = ud_yaml.read()
if ud == '':
log.debug("Received empty/no user data")
return ud
def _get_bucket_name(cluster_name, access_key):
"""Compose bucket name based on the user-provided cluster name and user access key"""
m = hashlib.md5()
m.update( cluster_name + access_key )
return "cm-" + m.hexdigest()
def _isurl(path):
"""Test if path is a net location. Tests the scheme and netloc."""
# BUG : URLs require a scheme string ('http://') to be used.
# www.google.com will fail.
# Should we prepend the scheme for those that don't have it and
# test that also?
scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)
return bool(scheme and netloc)
def _get_s3_conn(ud):
try:
if 'cloud_type' in ud and ud['cloud_type'] != 'ec2':
# If the user has specified a cloud type other than EC2,
# create an s3 connection using the info from their user data
log.debug('Establishing boto S3 connection to a custom Object Store')
try:
s3_conn = S3Connection(aws_access_key_id=ud['access_key'],
aws_secret_access_key=ud['secret_key'],
is_secure=ud.get('is_secure', True),
host=ud.get('s3_host', ''),
port=ud.get('s3_port', 8888),
calling_format=OrdinaryCallingFormat(),
path=ud.get('s3_conn_path', '/'))
except S3ResponseError, e:
log.error("Trouble connecting to a custom Object Store. User data: {0}; Exception: {1}"\
.format(ud, e))
else:
# Use the default Amazon S3 connection
log.debug('Establishing boto S3 connection to Amazon')
s3_conn = S3Connection(ud['access_key'], ud['secret_key'])
except Exception, e:
log.error("Exception getting S3 connection: %s" % e)
return None
return s3_conn
def _bucket_exists(s3_conn, bucket_name):
bucket = None
for i in range(1, 6):
try:
# log.debug("Looking for bucket '%s'" % bucket_name)
bucket = s3_conn.lookup(bucket_name)
break
except S3ResponseError:
log.error ("Bucket '%s' not found, attempt %s/5" % (bucket_name, i+1))
time.sleep(2)
if bucket is not None:
log.debug("Cluster bucket '%s' found." % bucket_name)
return True
else:
log.debug("Cluster bucket '%s' not found." % bucket_name)
return False
def _remote_file_exists(s3_conn, bucket_name, remote_filename):
b = None
for i in range(0, 5):
try:
b = s3_conn.get_bucket(bucket_name)
break
except S3ResponseError:
log.error ("Problem connecting to bucket '%s', attempt %s/5" % (bucket_name, i))
time.sleep(2)
if b is not None:
k = Key(b, remote_filename)
if k.exists():
return True
return False
def _save_file_to_bucket(s3_conn, bucket_name, remote_filename, local_file, force=False):
local_file = os.path.join(LOCAL_PATH, local_file)
# log.debug( "Establishing handle with bucket '%s'..." % bucket_name)
b = None
for i in range(0, 5):
try:
b = s3_conn.get_bucket(bucket_name)
break
except S3ResponseError, e:
log.error ("Problem connecting to bucket '%s', attempt %s/5" % (bucket_name, i))
time.sleep(2)
if b is not None:
# log.debug("Establishing handle with key object '%s'..." % remote_filename)
k = Key(b, remote_filename)
if k.exists() and not force:
log.debug("Remote file '%s' already exists. Not overwriting it." % remote_filename)
return True
log.debug( "Attempting to save local file '%s' to bucket '%s' as '%s'"
% (local_file, bucket_name, remote_filename))
try:
k.set_contents_from_filename(local_file)
log.info( "Successfully saved file '%s' to bucket '%s'." % (remote_filename, bucket_name))
return True
except S3ResponseError, e:
log.error("Failed to save file local file '%s' to bucket '%s' as file '%s': %s"
% (local_file, bucket_name, remote_filename, e))
return False
else:
return False
def _get_file_from_bucket(s3_conn, bucket_name, remote_filename, local_filename):
local_filename = os.path.join(LOCAL_PATH, local_filename)
try:
# log.debug("Establishing handle with bucket '%s'" % bucket_name)
b = s3_conn.get_bucket(bucket_name)
# log.debug("Establishing handle with file object '%s'" % remote_filename)
k = Key(b, remote_filename)
log.debug("Attempting to retrieve file '%s' from bucket '%s'" % (remote_filename, bucket_name))
if k.exists():
k.get_contents_to_filename(local_filename)
log.info("Successfully retrieved file '%s' from bucket '%s' to '%s'."
% (remote_filename, bucket_name, local_filename))
return True
else:
log.error("File '%s' in bucket '%s' not found." % (remote_filename, bucket_name))
return False
except S3ResponseError, e:
log.error("Failed to get file '%s' from bucket '%s': %s" % (remote_filename, bucket_name, e))
return False
def _get_file_from_url(url):
local_filename = os.path.join(LOCAL_PATH, os.path.split(url)[1])
log.info("Getting boot script from '%s' and saving it locally to '%s'" % (url, local_filename))
try:
f = urllib2.urlopen(url)
with open(local_filename, 'w') as local_file:
local_file.write(f.read())
os.chmod(local_filename, 0744)
if f:
log.debug("Got boot script from '%s'" % url)
return True
return False
except IOError:
log.error("Boot script at '%s' not found." % url)
return False
def _get_boot_script(ud):
# Test if cluster bucket exists; if it does not, resort to the default
# bucket for downloading the boot script
use_default_bucket = ud.get("use_default_bucket", False)
if ud.has_key('bucket_default'):
default_bucket_name = ud['bucket_default']
else:
default_bucket_name = DEFAULT_BUCKET_NAME
if not use_default_bucket and ud.has_key('bucket_cluster') and ud['access_key'] is not None and ud['secret_key'] is not None:
s3_conn = _get_s3_conn(ud)
# Check if cluster bucket exists or use the default one
if not _bucket_exists(s3_conn, ud['bucket_cluster']) or \
not _remote_file_exists(s3_conn, ud['bucket_cluster'], ud['boot_script_name']):
log.debug("Using default bucket '%s'" % default_bucket_name)
use_default_bucket = True
else:
log.debug("Using cluster bucket '%s'" % ud['bucket_cluster'])
use_default_bucket = False
else:
log.debug("bucket_clutser not specified or no credentials provided; defaulting to bucket '%s'"
% default_bucket_name)
use_default_bucket = True
# If using cluster bucket, use credentials because the boot script may not be accessible to everyone
got_boot_script = False
if use_default_bucket is False:
log.debug("Trying to get boot script '%s' from cluster bucket '%s'"
% (ud['boot_script_name'], ud.get('bucket_cluster', None)))
got_boot_script = _get_file_from_bucket(s3_conn, ud['bucket_cluster'], ud['boot_script_name'],
DEFAULT_BOOT_SCRIPT_NAME)
if got_boot_script:
os.chmod(os.path.join(LOCAL_PATH, DEFAULT_BOOT_SCRIPT_NAME), 0744)
# If did not get the boot script, fall back on the publicly available one
if not got_boot_script or use_default_bucket:
boot_script_url = os.path.join(_get_default_bucket_url(ud), ud.get('boot_script_name',
DEFAULT_BOOT_SCRIPT_NAME))
log.debug("Could not get boot script '%s' from cluster bucket '%s'; "
"retrieving the public one from bucket url '%s'" \
% (ud['boot_script_name'], ud.get('bucket_cluster', None), boot_script_url))
got_boot_script = _get_file_from_url(boot_script_url)
if got_boot_script:
log.debug("Saved boot script to '%s'" % os.path.join(LOCAL_PATH, DEFAULT_BOOT_SCRIPT_NAME))
# Save the downloaded boot script to cluster bucket for future invocations
use_object_store = ud.get("use_object_store", True)
if use_object_store and ud.has_key('bucket_cluster') and ud['bucket_cluster']:
s3_conn = _get_s3_conn(ud)
if _bucket_exists(s3_conn, ud['bucket_cluster']) and \
not _remote_file_exists(s3_conn, ud['bucket_cluster'], ud['boot_script_name']):
_save_file_to_bucket(s3_conn, ud['bucket_cluster'], ud['boot_script_name'], \
DEFAULT_BOOT_SCRIPT_NAME)
return True
log.debug("**Could not get the boot script**")
return False
def _run_boot_script(boot_script_name):
script = os.path.join(LOCAL_PATH, boot_script_name)
log.info("Running boot script '%s'" % script)
process = subprocess.Popen(script, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if process.returncode == 0:
log.debug("Successfully ran boot script '%s'" % script)
return True
else:
log.error("Error running boot script '%s'. Process returned code '%s' and following stderr: %s"
% (script, process.returncode, stderr))
return False
def _create_basic_user_data_file():
# Create a basic YAML file that is expected by CloudMan
with open(USER_DATA_FILE, 'w') as ud_file:
ud_formatted = {'access_key': None,
'boot_script_name': DEFAULT_BOOT_SCRIPT_NAME,
'boot_script_path': LOCAL_PATH,
'bucket_default': DEFAULT_BUCKET_NAME,
'bucket_cluster': None,
'cloudman_home': CLOUDMAN_HOME,
'cluster_name': 'aGalaxyCloudManCluster_%s' % random.randrange(1, 9999999),
'role': 'master',
'secret_key': None}
yaml.dump(ud_formatted, ud_file, default_flow_style=False)
return ud_formatted
def _get_default_bucket_url(ud=None):
if ud and ud.has_key('bucket_default'):
default_bucket_name = ud['bucket_default']
else:
default_bucket_name = DEFAULT_BUCKET_NAME
# TODO: Check if th bucket 'default_bucket_name' is accessible to everyone
# because it is being accessed as a URL
if ud:
bucket_url = ud.get("default_bucket_url", None)
else:
bucket_url = None
if not bucket_url:
bucket_url = os.path.join(SERVICE_ROOT, default_bucket_name)
log.debug("Default bucket url: %s" % bucket_url)
return bucket_url
def _user_exists(username):
""" Check if the given username exists as a system user
"""
with open('/etc/passwd', 'r') as f:
ep = f.read()
return ep.find(username) > 0
def _allow_password_logins(passwd):
for user in ["ubuntu", "galaxy"]:
if _user_exists(user):
log.info("Setting up password-based login for user '{0}'".format(user))
p1 = subprocess.Popen(["echo", "%s:%s" % (user, passwd)], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["chpasswd"], stdin=p1.stdout, stdout=subprocess.PIPE)
p1.stdout.close()
p2.communicate()[0]
cl = ["sed", "-i", "s/^PasswordAuthentication .*/PasswordAuthentication yes/",
"/etc/ssh/sshd_config"]
subprocess.check_call(cl)
cl = ["/usr/sbin/service", "ssh", "reload"]
subprocess.check_call(cl)
def _handle_freenx(passwd):
# Check if FreeNX is installed on the image before trying to configure it
cl = "/usr/bin/dpkg --get-selections | /bin/grep freenx"
retcode = subprocess.call(cl, shell=True)
if retcode == 0:
log.info("Setting up FreeNX")
cl = ["dpkg-reconfigure", "-pcritical", "freenx-server"]
# On slower/small instance types, there can be a conflict when running
# debconf so try this a few times
for i in range(5):
retcode = subprocess.call(cl)
if retcode == 0:
break
else:
time.sleep(5)
else:
log.info("freenx-server is not installed; not configuring it")
# ====================== Actions methods ======================
def _handle_empty():
log.info("Received empty user data; assuming default contextualization")
_create_basic_user_data_file() # This file is expected by CloudMan
# Get & run boot script
file_url = os.path.join(_get_default_bucket_url(), DEFAULT_BOOT_SCRIPT_NAME)
log.debug("Resorting to the default bucket to get the boot script: %s" % file_url)
_get_file_from_url(file_url)
_run_boot_script(DEFAULT_BOOT_SCRIPT_NAME)
def _handle_url(url):
log.info("Handling user data provided URL: '%s'" % url)
_get_file_from_url(url)
boot_script_name = os.path.split(url)[1]
_run_boot_script(boot_script_name)
#http://stackoverflow.com/questions/823196/yaml-merge-in-python
def _merge(specific, default):
"""
Recursively merges two yaml produced data structures,
a more specific input (`specific`) and defaults
(`default`).
"""
if isinstance(specific, dict) and isinstance(default, dict):
for k, v in default.iteritems():
if k not in specific:
specific[k] = v
else:
specific[k] = _merge(specific[k], v)
return specific
def _load_user_data(user_data):
""" Loads user data into dict (using pyyaml). If machine image
contains default data this is loaded and populated in resulting
data structure as well. These separate options are merged using
the `_merge` function above and priority is always given to
user supplied options.
"""
ud = yaml.load(user_data)
if ud == user_data:
# Bad user data, cannot merge default
return ud
default_user_data_path = \
os.path.join(os.path.dirname(os.path.abspath(__file__)), 'IMAGE_USER_DATA')
if os.path.exists(default_user_data_path):
image_ud = yaml.load(open(default_user_data_path, 'r').read())
if image_ud:
ud = _merge(ud, image_ud)
return ud
def _handle_yaml(user_data):
""" Process user data in YAML format"""
log.info("Handling user data in YAML format.")
ud = _load_user_data(user_data)
# Handle bad user data as a string
if ud == user_data:
return _handle_empty()
# Allow password based logins. Do so also in case only NX is being setup.
if "freenxpass" in ud or "password" in ud:
passwd = ud.get("freenxpass", None) or ud.get("password", None)
_allow_password_logins(passwd)
# Handle freenx passwords and the case with only a NX password sent
if "freenxpass" in ud:
_handle_freenx(ud["freenxpass"])
if len(ud) == 1:
return _handle_empty()
# Create a YAML file from user data and store it as USER_DATA_FILE
# This code simply ensures fields required by CloudMan are in the
# created file. Any other fields that might be included as user data
# are also included in the created USER_DATA_FILE
if ud.get('no_start', None) is not None:
log.info("Received 'no_start' user data option. Not doing anything else.")
return
if not ud.has_key('cluster_name'):
log.warning("The provided user data should contain cluster_name field.")
ud['cluster_name'] = 'aCloudManCluster_%s' % random.randrange(1, 9999999)
elif ud['cluster_name'] == '':
log.warning("The cluster_name field of user data should not be empty.")
ud['cluster_name'] = 'aCloudManCluster_%s' % random.randrange(1, 9999999)
if not ud.has_key('access_key'):
log.info("The provided user data does not contain access_key field; setting it to None..")
ud['access_key'] = None
elif ud['access_key'] == '' or ud['access_key'] is None:
log.warning("The access_key field of user data should not be empty; setting it to None.")
ud['access_key'] = None
if not ud.has_key('secret_key'):
log.info("The provided user data does not contain secret_key field; setting it to None.")
ud['secret_key'] = None
elif ud['secret_key'] == '' or ud['secret_key'] is None:
log.warning("The secret_key field of user data should not be empty; setting it to None.")
ud['secret_key'] = None
if not ud.has_key('password'):
log.warning("The provided user data should contain password field.")
elif ud['password'] == '':
log.warning("The password field of user data should not be empty.")
else: # ensure the password is a string
ud['password'] = str(ud['password'])
if not ud.has_key('bucket_default'):
log.debug("The provided user data does not contain bucket_default field; setting it to '%s'."
% DEFAULT_BUCKET_NAME)
ud['bucket_default'] = DEFAULT_BUCKET_NAME
elif ud['bucket_default'] == '':
log.warning("The bucket_default field of user data was empty; setting it to '%s'."
% DEFAULT_BUCKET_NAME)
ud['bucket_default'] = DEFAULT_BUCKET_NAME
if not ud.has_key('bucket_cluster'):
if ud['access_key'] is not None and ud['secret_key'] is not None:
ud['bucket_cluster'] = _get_bucket_name(ud['cluster_name'], ud['access_key'])
if not ud.has_key('role'):
ud['role'] = 'master'
if not ud.has_key('cloudman_home'):
ud['cloudman_home'] = CLOUDMAN_HOME
if not ud.has_key('boot_script_name'):
ud['boot_script_name'] = DEFAULT_BOOT_SCRIPT_NAME
ud['boot_script_path'] = LOCAL_PATH # Marks where boot script was saved
log.debug("Composed user data: %s" % ud)
with open(USER_DATA_FILE, 'w') as ud_yaml:
yaml.dump(ud, ud_yaml, default_flow_style=False)
# Get & run boot script
if _get_boot_script(ud):
_run_boot_script(DEFAULT_BOOT_SCRIPT_NAME)
# ====================== Driver code ======================
def _parse_user_data(ud):
if ud == '':
_handle_empty()
elif _isurl(ud):
_handle_url(ud)
else: # default to yaml
_handle_yaml(ud)
def main():
if not os.path.exists(LOCAL_PATH):
os.mkdir(LOCAL_PATH)
global log
log = _setup_logging()
ud = _get_user_data()
_parse_user_data(ud)
log.info("---> %s done <---" % sys.argv[0])
if __name__ == "__main__":
main()
| |
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
from rest_framework import serializers
from django.db.models import Max
from django.contrib.gis.geos import GEOSGeometry
from aquifers import models
HYDRAULIC_SUBTYPES = ['1a', '1b', '1c', '2', '3', '4a', '5']
class AquiferResourceSerializerV2(serializers.ModelSerializer):
"""Serialize aquifer resourcelist"""
section_code = serializers.PrimaryKeyRelatedField(
queryset=models.AquiferResourceSection.objects.all(),
source='section.code')
def to_internal_value(self, data):
"""
Dict of native values <- Dict of primitive datatypes.
Add instance key to values if `id` present in primitive dict
:param data:
"""
obj = super(AquiferResourceSerializerV2, self).to_internal_value(data)
instance_id = data.get('id', None)
if instance_id:
obj['instance'] = models.AquiferResource.objects.get(
id=instance_id)
return obj
class Meta:
model = models.AquiferResource
fields = (
'id',
'name',
'url',
'section_code'
)
class AquiferSerializerV2(serializers.ModelSerializer):
"""Serialize an aquifer list"""
extent = serializers.SerializerMethodField()
def get_extent(self, obj):
"""
Gets a GeoJSON extent
"""
extent_raw_geom = obj["extent"]
if extent_raw_geom is not None:
extent_geometry = GEOSGeometry(extent_raw_geom)
return extent_geometry.extent
return None
def to_representation(self, instance):
"""
Rather the declare serializer fields, we must reference them here because
they are queried as a `dict`, which dramatically improves performance
due to the high number of joined tables needing pruned in the associated query.
Note: we also use short field names to save 100 kB over the network, since there are over 1000 records
routinely fetched
"""
ret = super().to_representation(instance)
ret['id'] = instance['aquifer_id']
ret['name'] = instance['aquifer_name']
if instance['area']:
ret['area'] = float(instance.get('area'))
ret['lsu'] = instance['litho_stratographic_unit']
ret['location'] = instance['location_description']
ret['demand'] = instance['demand__description']
ret['material'] = instance['material__description']
ret['subtype'] = instance['subtype__description']
ret['vulnerability'] = instance['vulnerability__description']
ret['productivity'] = instance['productivity__description']
return ret
class Meta:
model = models.Aquifer
fields = (
'aquifer_id',
'mapping_year',
'extent',
'retire_date'
)
class AquiferEditDetailSerializerV2(serializers.ModelSerializer):
"""
Read serializer for aquifer details with primary key references needed for populating an edit form
"""
resources = AquiferResourceSerializerV2(many=True, required=False)
licence_details = serializers.JSONField(read_only=True)
class Meta:
model = models.Aquifer
fields = (
'aquifer_id',
'aquifer_name',
'location_description',
'quality_concern',
'material',
'subtype',
'vulnerability',
'known_water_use',
'litho_stratographic_unit',
'productivity',
'demand',
'mapping_year',
'resources',
'area',
'notes',
'licence_details',
'effective_date',
'expiry_date',
'retire_date',
)
class AquiferDetailSerializerV2(serializers.ModelSerializer):
resources = AquiferResourceSerializerV2(many=True, required=False)
licence_details = serializers.JSONField(read_only=True)
def create(self, validated_data):
"""
Allow creating resources inline of the aquifer API. ie)
{
resources: [{
url: 'http://...',
name: 'A resource',
section_id: 1
}, {
...
}, ...]
...
}
"""
resources_data = validated_data.pop('resources', [])
aquifer = models.Aquifer.objects.create(**validated_data)
for resource_item in resources_data:
aquifer_resource = models.AquiferResource(
url=resource_item['url'],
name=resource_item['name'],
aquifer=aquifer,
section_id=resource_item['section']['code'].code)
aquifer_resource.save()
return aquifer
def update(self, instance, validated_data):
"""
Update the resources associated with an aquifer, inline of the aquifer API.
"""
resources_data = validated_data.pop('resources', [])
for k, v in validated_data.items():
setattr(instance, k, v)
instance.save()
# Any items removed from the inline collection are deleted.
to_delete = models.AquiferResource.objects.filter(
aquifer=instance
).exclude(
id__in=[r['id'] for r in resources_data if 'id' in r]
)
to_delete.delete()
for resource_item in resources_data:
if 'instance' in resource_item:
resource = resource_item['instance']
resource.section = resource_item['section']['code']
resource.name = resource_item['name']
resource.url = resource_item['url']
resource.save()
else:
r = models.AquiferResource(
url=resource_item['url'],
name=resource_item['name'],
aquifer=instance,
section_id=resource_item['section']['code'].code)
r.save()
return instance
def to_representation(self, instance):
"""
Fetch many details related to a aquifer, used to generate its' summary page.
"""
ret = super().to_representation(instance)
if instance.geom:
instance.geom.transform(4326)
ret['geom'] = json.loads(instance.geom.json)
# respond with the 'description' field for the following items, which are otherwise
# references to code tables. Testing for the reference first prevents type errors,
# since these fields are nullable. If the field is null, we set these values to None
ret['demand'] = instance.demand and instance.demand.description or None
ret['material'] = instance.material and instance.material.description or None
ret['productivity'] = instance.productivity and instance.productivity.description or None
ret['subtype'] = instance.subtype and instance.subtype.description or None
ret['vulnerability'] = instance.vulnerability and instance.vulnerability.description or None
ret['quality_concern'] = instance.quality_concern and instance.quality_concern.description or None
details = {}
licences = models.WaterRightsLicence.objects.filter(
well__aquifer=instance
).select_related('purpose')
# distinct licence numbers.
details['licence_count'] = len(
licences.values('licence_number').distinct())
# latest date when licence data were updated.
details['licences_updated'] = licences.aggregate(
Max('update_date')
)
# Water quality info
details['num_wells_with_ems'] = instance.well_set.filter(
ems__isnull=False).count()
# Artesian conditions
details['num_artesian_wells'] = instance.well_set.filter(
artesian_conditions=True).count()
# Wells associated to an aquifer
details['wells_updated'] = instance.well_set.all().aggregate(
Max('update_date')
)
details['num_wells'] = instance.well_set.all().count()
details['obs_wells'] = instance.well_set.filter(
observation_well_number__isnull=False
).values('well_tag_number', 'observation_well_number', 'observation_well_status')
details.update(self._tally_licence_data(licences))
if instance.subtype:
details['hydraulically_connected'] = instance.subtype.code in HYDRAULIC_SUBTYPES
ret['licence_details'] = details
return ret
def _tally_licence_data(self, licences):
# Collect licences by number, for tallying according to logic in business area.
# Some types of licences must be merged when calculating totals depending on
# "quantity flags". See inline for details.
_licence_map = {}
for licence in licences:
if licence.licence_number not in _licence_map: # only insert each licence once.
_licence_map[licence.licence_number] = {
'wells': [],
'usage_by_purpose': {}
}
_licence_dict = _licence_map[licence.licence_number]
_licence_dict['wells'] = set(
[l['well_tag_number'] for l in licence.well_set.all().values('well_tag_number')]
) | set(_licence_dict['wells'])
if licence.purpose.description not in _licence_dict['usage_by_purpose']:
_licence_dict['usage_by_purpose'][licence.purpose.description] = 0
# for 'M' licences, only add the quantity once for the entire purpose.
if licence.quantity_flag == 'M':
_licence_dict['usage_by_purpose'][licence.purpose.description] += licence.quantity
# For any flag other than 'M' total all usage values.
if licence.quantity_flag != 'M':
_licence_dict['usage_by_purpose'][licence.purpose.description] += licence.quantity
details = {
'wells_by_licence': [],
'usage': [],
'lic_qty': []
}
# Again, generate some maps. Group by purpose this time.
_lic_qty_map = {}
_usage_map = {}
# re-format well by licence output.
for licence_number, licence_dict in _licence_map.items():
details['wells_by_licence'].append({
'licence_number': licence_number,
'well_tag_numbers_in_licence': ', '.join(map(str, licence_dict['wells']))
})
for purpose, usage in licence_dict['usage_by_purpose'].items():
if purpose not in _lic_qty_map:
_lic_qty_map[purpose] = 0
_usage_map[purpose] = 0
_lic_qty_map[purpose] += 1
_usage_map[purpose] += usage
# re-format for final output.
for purpose, qty in _lic_qty_map.items():
details['lic_qty'].append({
'purpose__description': purpose,
'total_qty': qty
})
details['usage'].append({
'purpose__description': purpose,
'total_qty': _usage_map[purpose]
})
return details
class Meta:
model = models.Aquifer
fields = (
'aquifer_id',
'aquifer_name',
'location_description',
'quality_concern',
'material',
'subtype',
'vulnerability',
'known_water_use',
'litho_stratographic_unit',
'productivity',
'demand',
'mapping_year',
'resources',
'area',
'notes',
'licence_details',
'effective_date',
'expiry_date',
'retire_date',
)
| |
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for testing the cells weight algorithms.
Cells with higher weights should be given priority for new builds.
"""
import datetime
from oslo_utils import fixture as utils_fixture
from oslo_utils import timeutils
from nova.cells import state
from nova.cells import weights
from nova import test
class FakeCellState(state.CellState):
def __init__(self, cell_name):
super(FakeCellState, self).__init__(cell_name)
self.capacities['ram_free'] = {'total_mb': 0,
'units_by_mb': {}}
self.db_info = {}
def _update_ram_free(self, *args):
ram_free = self.capacities['ram_free']
for ram_size, units in args:
ram_free['total_mb'] += units * ram_size
ram_free['units_by_mb'][str(ram_size)] = units
def _get_fake_cells():
cell1 = FakeCellState('cell1')
cell1._update_ram_free((512, 1), (1024, 4), (2048, 3))
cell1.db_info['weight_offset'] = -200.0
cell2 = FakeCellState('cell2')
cell2._update_ram_free((512, 2), (1024, 3), (2048, 4))
cell2.db_info['weight_offset'] = -200.1
cell3 = FakeCellState('cell3')
cell3._update_ram_free((512, 3), (1024, 2), (2048, 1))
cell3.db_info['weight_offset'] = 400.0
cell4 = FakeCellState('cell4')
cell4._update_ram_free((512, 4), (1024, 1), (2048, 2))
cell4.db_info['weight_offset'] = 300.0
return [cell1, cell2, cell3, cell4]
class CellsWeightsTestCase(test.NoDBTestCase):
"""Makes sure the proper weighers are in the directory."""
def test_all_weighers(self):
weighers = weights.all_weighers()
# Check at least a couple that we expect are there
self.assertTrue(len(weighers) >= 2)
class_names = [cls.__name__ for cls in weighers]
self.assertIn('WeightOffsetWeigher', class_names)
self.assertIn('RamByInstanceTypeWeigher', class_names)
class _WeigherTestClass(test.NoDBTestCase):
"""Base class for testing individual weigher plugins."""
weigher_cls_name = None
def setUp(self):
super(_WeigherTestClass, self).setUp()
self.weight_handler = weights.CellWeightHandler()
weigher_classes = self.weight_handler.get_matching_classes(
[self.weigher_cls_name])
self.weighers = [cls() for cls in weigher_classes]
def _get_weighed_cells(self, cells, weight_properties):
return self.weight_handler.get_weighed_objects(self.weighers,
cells, weight_properties)
class RAMByInstanceTypeWeigherTestClass(_WeigherTestClass):
weigher_cls_name = ('nova.cells.weights.ram_by_instance_type.'
'RamByInstanceTypeWeigher')
def test_default_spreading(self):
"""Test that cells with more ram available return a higher weight."""
cells = _get_fake_cells()
# Simulate building a new 512MB instance.
instance_type = {'memory_mb': 512}
weight_properties = {'request_spec': {'instance_type': instance_type}}
weighed_cells = self._get_weighed_cells(cells, weight_properties)
self.assertEqual(4, len(weighed_cells))
resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells]
expected_cells = [cells[3], cells[2], cells[1], cells[0]]
self.assertEqual(expected_cells, resulting_cells)
# Simulate building a new 1024MB instance.
instance_type = {'memory_mb': 1024}
weight_properties = {'request_spec': {'instance_type': instance_type}}
weighed_cells = self._get_weighed_cells(cells, weight_properties)
self.assertEqual(4, len(weighed_cells))
resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells]
expected_cells = [cells[0], cells[1], cells[2], cells[3]]
self.assertEqual(expected_cells, resulting_cells)
# Simulate building a new 2048MB instance.
instance_type = {'memory_mb': 2048}
weight_properties = {'request_spec': {'instance_type': instance_type}}
weighed_cells = self._get_weighed_cells(cells, weight_properties)
self.assertEqual(4, len(weighed_cells))
resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells]
expected_cells = [cells[1], cells[0], cells[3], cells[2]]
self.assertEqual(expected_cells, resulting_cells)
def test_negative_multiplier(self):
"""Test that cells with less ram available return a higher weight."""
self.flags(ram_weight_multiplier=-1.0, group='cells')
cells = _get_fake_cells()
# Simulate building a new 512MB instance.
instance_type = {'memory_mb': 512}
weight_properties = {'request_spec': {'instance_type': instance_type}}
weighed_cells = self._get_weighed_cells(cells, weight_properties)
self.assertEqual(4, len(weighed_cells))
resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells]
expected_cells = [cells[0], cells[1], cells[2], cells[3]]
self.assertEqual(expected_cells, resulting_cells)
# Simulate building a new 1024MB instance.
instance_type = {'memory_mb': 1024}
weight_properties = {'request_spec': {'instance_type': instance_type}}
weighed_cells = self._get_weighed_cells(cells, weight_properties)
self.assertEqual(4, len(weighed_cells))
resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells]
expected_cells = [cells[3], cells[2], cells[1], cells[0]]
self.assertEqual(expected_cells, resulting_cells)
# Simulate building a new 2048MB instance.
instance_type = {'memory_mb': 2048}
weight_properties = {'request_spec': {'instance_type': instance_type}}
weighed_cells = self._get_weighed_cells(cells, weight_properties)
self.assertEqual(4, len(weighed_cells))
resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells]
expected_cells = [cells[2], cells[3], cells[0], cells[1]]
self.assertEqual(expected_cells, resulting_cells)
class WeightOffsetWeigherTestClass(_WeigherTestClass):
"""Test the RAMWeigher class."""
weigher_cls_name = 'nova.cells.weights.weight_offset.WeightOffsetWeigher'
def test_weight_offset(self):
"""Test that cells with higher weight_offsets return higher
weights.
"""
cells = _get_fake_cells()
weighed_cells = self._get_weighed_cells(cells, {})
self.assertEqual(4, len(weighed_cells))
expected_cells = [cells[2], cells[3], cells[0], cells[1]]
resulting_cells = [weighed_cell.obj for weighed_cell in weighed_cells]
self.assertEqual(expected_cells, resulting_cells)
class MuteWeigherTestClass(_WeigherTestClass):
weigher_cls_name = 'nova.cells.weights.mute_child.MuteChildWeigher'
def setUp(self):
super(MuteWeigherTestClass, self).setUp()
self.flags(mute_weight_multiplier=-10.0, mute_child_interval=100,
group='cells')
self.now = timeutils.utcnow()
self.useFixture(utils_fixture.TimeFixture(self.now))
self.cells = _get_fake_cells()
for cell in self.cells:
cell.last_seen = self.now
def test_non_mute(self):
weight_properties = {}
weighed_cells = self._get_weighed_cells(self.cells, weight_properties)
self.assertEqual(4, len(weighed_cells))
for weighed_cell in weighed_cells:
self.assertEqual(0, weighed_cell.weight)
def test_mutes(self):
# make 2 of them mute:
self.cells[0].last_seen = (self.cells[0].last_seen -
datetime.timedelta(seconds=200))
self.cells[1].last_seen = (self.cells[1].last_seen -
datetime.timedelta(seconds=200))
weight_properties = {}
weighed_cells = self._get_weighed_cells(self.cells, weight_properties)
self.assertEqual(4, len(weighed_cells))
for i in range(2):
weighed_cell = weighed_cells.pop(0)
self.assertEqual(0, weighed_cell.weight)
self.assertIn(weighed_cell.obj.name, ['cell3', 'cell4'])
for i in range(2):
weighed_cell = weighed_cells.pop(0)
self.assertEqual(-10.0, weighed_cell.weight)
self.assertIn(weighed_cell.obj.name, ['cell1', 'cell2'])
| |
#
# Main bot class
# Handles routing, config, agent/module loading
#
import json
import threading
import os, sys
import importlib
import collections
import halibot.packages
from distutils.version import StrictVersion as Version
from queue import Queue,Empty
from string import Template
from .halmodule import HalModule
from .halagent import HalAgent
from .halauth import HalAuth
# Avoid appending "." if it i
if "." not in sys.path:
sys.path.append(".")
import logging
HALDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
class ObjectDict(dict):
@property
def modules(self):
return dict(filter(lambda x: isinstance(x[1], HalModule), self.items()))
@property
def agents(self):
return dict(filter(lambda x: isinstance(x[1], HalAgent), self.items()))
#
# This class represents the hierarchy of config files
#
class Config(collections.MutableMapping):
def __init__(self):
self.local = {}
self.system = {}
def _load_config(self, workdir="."):
# Special values expanded inside of configs
specials = {
'HALDIR': HALDIR
}
with open(os.path.join(workdir, "config.json"), "r") as f:
text = Template(f.read()).safe_substitute(**specials)
self.set_local(json.loads(text))
try:
with open(os.path.join(HALDIR, "config", "system.json")) as f:
text = Template(f.read()).safe_substitute(**specials)
self.set_system(json.loads(text))
except FileNotFoundError as _:
self.log.info("No system config loaded.")
def _write_config(self, workdir="."):
with open(os.path.join(workdir, "config.json"), "w") as f:
f.write(json.dumps(self.local, sort_keys=True, indent=4))
def _refresh_packages(self):
lpath = self.local.get("package-path", [])
spath = self.system.get("package-path", [])
halibot.packages.__path__ = lpath + spath
def set_local(self, local):
self.local = local
self._refresh_packages()
def set_system(self, system):
self.system = system
self._refresh_packages()
def __getitem__(self, key):
if key in self.local: return self.local[key]
return self.system[key]
def __setitem__(self, key, value):
self.local[key] = value
def __delitem__(self, key):
del self.local[value]
def __iter__(self):
return iter(self.local.keys() + self.system.keys())
def __len__(self):
return len(set(self.local.keys()) + set(self.system.keys()))
def __keytransform__(self, key):
return key
class Halibot():
VERSION = "0.2.0"
running = False
log = None
def __init__(self, **kwargs):
self.log = logging.getLogger(self.__class__.__name__)
self.use_config = kwargs.get("use_config", True)
self.workdir = kwargs.get("workdir", ".")
self.auth = HalAuth()
self.objects = ObjectDict()
self.config = Config()
# Start the Hal instance
def start(self, block=True):
self.running = True
if self.use_config:
self.config._load_config()
self._instantiate_objects("agent")
self._instantiate_objects("module")
self._instantiate_objects("filter")
if self.config.get("use-auth", False):
self.auth.load_perms(self.config.get("auth-path","permissions.json"))
def shutdown(self):
self.log.info("Shutting down halibot...");
for o in self.objects.values():
o._shutdown()
self.log.info("Halibot shutdown. Threads left: " + str(threading.active_count()))
def _check_version(self, obj):
v = Version(self.VERSION)
if not hasattr(obj, "HAL_MINIMUM"):
self.log.warn("Module class '{}' does not define a minimum version, trying to load anyway...".format(obj.__class__.__name__))
return True
if v < Version(obj.HAL_MINIMUM):
self.log.error("Rejecting load of '{}', requires minimum Halibot version '{}'. (Currently running '{}')".format(obj.__class__.__name__, obj.HAL_MINIMUM, self.VERSION))
return False
if hasattr(obj, "HAL_MAXIMUM"):
if v >= Version(obj.HAL_MAXIMUM):
self.log.error("Rejecting load of '{}', requires maximum Halibot version '{}'. (Currently running '{}')".format(obj.__class__.__name__, obj.HAL_MAXIMUM, self.VERSION))
return False
return True
def add_instance(self, name, inst):
self.objects[name] = inst
inst.name = name
inst.init()
self.log.info("Instantiated object '" + name + "'")
def _load_config(self):
self.config._load_config(workdir=self.workdir)
def _write_config(self):
self.config._write_config(workdir=self.workdir)
def _get_class_from_package(self, pkgname, clsname):
pkg = self.get_package(pkgname)
if pkg == None:
self.log.error("Cannot find package {}!".format(pkgname))
return None
obj = getattr(pkg, clsname, None)
if obj == None:
self.log.error("Cannot find class {} in package {}!".format(clsname, pkgname))
return None
return obj
# Load a halobject from a package descriptor (e.g. hello:Hello), but does NOT
# start or add it to the current instance.
def load_object(self, pkg, conf={}):
split = pkg.split(":")
if len(split) == 2:
obj = self._get_class_from_package(*split)
if obj and self._check_version(obj):
return obj(self, conf=conf)
else:
self.log.error("Invalid class identifier {}, must contain only 1 ':'".format(conf["of"]))
return None
def _instantiate_objects(self, key):
inst = self.config.get(key + "-instances", {})
for k in inst.keys():
conf = inst[k]
obj = self.load_object(conf["of"], conf=conf)
if obj:
self.add_instance(k, obj)
def get_package(self, name):
return importlib.import_module('halibot.packages.' + name)
def reload(self, name):
parent = 'halibot.packages.' + name
for k,o in self.objects.items():
if o.__module__.startswith(parent + '.') or o.__module__ == parent:
o._shutdown()
mod = importlib.reload(importlib.import_module(o.__module__))
cls = getattr(mod, o.__class__.__name__)
self.add_instance(k, cls(self, o.config))
# Restart a module instance by name
def restart(self, name):
o = self.objects.get(name)
if o:
o.shutdown()
o.init()
else:
self.log.warning("Failed to restart instance '{}'".format(name))
| |
# Copyright (c) 2011 Justin Santa Barbara
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
from oslo_utils import netutils
import requests
from six.moves import http_client
from six.moves import urllib
from cinder.i18n import _
from cinder.tests.unit import fake_constants as fake
class OpenStackApiException(Exception):
message = 'Unspecified error'
def __init__(self, response=None, msg=None):
self.response = response
# Give chance to override default message
if msg:
self.message = msg
if response:
self.message = _(
'%(message)s\nStatus Code: %(_status)s\nBody: %(_body)s') % {
'_status': response.status_code, '_body': response.text,
'message': self.message}
super(OpenStackApiException, self).__init__(self.message)
class OpenStackApiException401(OpenStackApiException):
message = _("401 Unauthorized Error")
class OpenStackApiException404(OpenStackApiException):
message = _("404 Not Found Error")
class OpenStackApiException413(OpenStackApiException):
message = _("413 Request entity too large")
class OpenStackApiException400(OpenStackApiException):
message = _("400 Bad Request")
class TestOpenStackClient(object):
"""Simple OpenStack API Client.
This is a really basic OpenStack API client that is under our control,
so we can make changes / insert hooks for testing
"""
def __init__(self, auth_user, auth_key, auth_uri, api_version=None):
super(TestOpenStackClient, self).__init__()
self.auth_result = None
self.auth_user = auth_user
self.auth_key = auth_key
self.auth_uri = auth_uri
# default project_id
self.project_id = fake.PROJECT_ID
self.api_version = api_version
def request(self, url, method='GET', body=None, headers=None,
ssl_verify=True, stream=False):
_headers = {'Content-Type': 'application/json'}
_headers.update(headers or {})
parsed_url = urllib.parse.urlparse(url)
port = parsed_url.port
hostname = parsed_url.hostname
scheme = parsed_url.scheme
if netutils.is_valid_ipv6(hostname):
hostname = "[%s]" % hostname
relative_url = parsed_url.path
if parsed_url.query:
relative_url = relative_url + "?" + parsed_url.query
if port:
_url = "%s://%s:%d%s" % (scheme, hostname, int(port), relative_url)
else:
_url = "%s://%s%s" % (scheme, hostname, relative_url)
response = requests.request(method, _url, data=body, headers=_headers,
verify=ssl_verify, stream=stream)
return response
def _authenticate(self, reauthenticate=False):
if self.auth_result and not reauthenticate:
return self.auth_result
auth_uri = self.auth_uri
headers = {'X-Auth-User': self.auth_user,
'X-Auth-Key': self.auth_key,
'X-Auth-Project-Id': self.project_id}
response = self.request(auth_uri,
headers=headers)
http_status = response.status_code
if http_status == http_client.UNAUTHORIZED:
raise OpenStackApiException401(response=response)
self.auth_result = response.headers
return self.auth_result
def update_project(self, new_project_id):
self.project_id = new_project_id
self._authenticate(True)
def api_request(self, relative_uri, check_response_status=None, **kwargs):
auth_result = self._authenticate()
# NOTE(justinsb): httplib 'helpfully' converts headers to lower case
base_uri = auth_result['x-server-management-url']
full_uri = '%s/%s' % (base_uri, relative_uri)
headers = kwargs.setdefault('headers', {})
headers['X-Auth-Token'] = auth_result['x-auth-token']
if self.api_version:
headers['OpenStack-API-Version'] = 'volume ' + self.api_version
response = self.request(full_uri, **kwargs)
http_status = response.status_code
if check_response_status:
if http_status not in check_response_status:
message = None
try:
exc = globals()["OpenStackApiException%s" % http_status]
except KeyError:
exc = OpenStackApiException
message = _("Unexpected status code")
raise exc(response, message)
return response
def _decode_json(self, response):
body = response.text
if body:
return jsonutils.loads(body)
else:
return ""
def api_get(self, relative_uri, **kwargs):
kwargs.setdefault('check_response_status', [http_client.OK])
response = self.api_request(relative_uri, **kwargs)
return self._decode_json(response)
def api_post(self, relative_uri, body, **kwargs):
kwargs['method'] = 'POST'
if body:
headers = kwargs.setdefault('headers', {})
headers['Content-Type'] = 'application/json'
kwargs['body'] = jsonutils.dumps(body)
kwargs.setdefault('check_response_status', [http_client.OK,
http_client.ACCEPTED])
response = self.api_request(relative_uri, **kwargs)
return self._decode_json(response)
def api_put(self, relative_uri, body, **kwargs):
kwargs['method'] = 'PUT'
if body:
headers = kwargs.setdefault('headers', {})
headers['Content-Type'] = 'application/json'
kwargs['body'] = jsonutils.dumps(body)
kwargs.setdefault('check_response_status', [http_client.OK,
http_client.ACCEPTED,
http_client.NO_CONTENT])
response = self.api_request(relative_uri, **kwargs)
return self._decode_json(response)
def api_delete(self, relative_uri, **kwargs):
kwargs['method'] = 'DELETE'
kwargs.setdefault('check_response_status', [http_client.OK,
http_client.ACCEPTED,
http_client.NO_CONTENT])
return self.api_request(relative_uri, **kwargs)
def get_volume(self, volume_id):
return self.api_get('/volumes/%s' % volume_id)['volume']
def get_volumes(self, detail=True):
rel_url = '/volumes/detail' if detail else '/volumes'
return self.api_get(rel_url)['volumes']
def post_volume(self, volume):
return self.api_post('/volumes', volume)['volume']
def delete_volume(self, volume_id):
return self.api_delete('/volumes/%s' % volume_id)
def put_volume(self, volume_id, volume):
return self.api_put('/volumes/%s' % volume_id, volume)['volume']
def quota_set(self, project_id, quota_update):
return self.api_put(
'os-quota-sets/%s' % project_id,
{'quota_set': quota_update})['quota_set']
def quota_get(self, project_id, usage=True):
return self.api_get('os-quota-sets/%s?usage=%s'
% (project_id, usage))['quota_set']
def create_type(self, type_name, extra_specs=None):
type = {"volume_type": {"name": type_name}}
if extra_specs:
type['extra_specs'] = extra_specs
return self.api_post('/types', type)['volume_type']
def delete_type(self, type_id):
return self.api_delete('/types/%s' % type_id)
def get_type(self, type_id):
return self.api_get('/types/%s' % type_id)['volume_type']
def create_volume_type_extra_specs(self, volume_type_id, extra_specs):
extra_specs = {"extra_specs": extra_specs}
url = "/types/%s/extra_specs" % volume_type_id
return self.api_post(url, extra_specs)['extra_specs']
def create_group_type_specs(self, grp_type_id, group_specs):
group_specs = {"group_specs": group_specs}
url = "/group_types/%s/group_specs" % grp_type_id
return self.api_post(url, group_specs)['group_specs']
def create_group_type(self, type_name, grp_specs=None):
grp_type = {"group_type": {"name": type_name}}
if grp_specs:
grp_type['group_specs'] = grp_specs
return self.api_post('/group_types', grp_type)['group_type']
def delete_group_type(self, group_type_id):
return self.api_delete('/group_types/%s' % group_type_id)
def get_group_type(self, grp_type_id):
return self.api_get('/group_types/%s' % grp_type_id)['group_type']
def get_group(self, group_id):
return self.api_get('/groups/%s' % group_id)['group']
def get_groups(self, detail=True):
rel_url = '/groups/detail' if detail else '/groups'
return self.api_get(rel_url)['groups']
def post_group(self, group):
return self.api_post('/groups', group)['group']
def post_group_from_src(self, group):
return self.api_post('/groups/action', group)['group']
def delete_group(self, group_id, params):
return self.api_post('/groups/%s/action' % group_id, params)
def reset_group(self, group_id, params):
return self.api_post('/groups/%s/action' % group_id, params)
def put_group(self, group_id, group):
return self.api_put('/groups/%s' % group_id, group)['group']
def get_group_snapshot(self, group_snapshot_id):
return self.api_get('/group_snapshots/%s' % group_snapshot_id)[
'group_snapshot']
def get_group_snapshots(self, detail=True):
rel_url = '/group_snapshots/detail' if detail else '/group_snapshots'
return self.api_get(rel_url)['group_snapshots']
def post_group_snapshot(self, group_snapshot):
return self.api_post('/group_snapshots', group_snapshot)[
'group_snapshot']
def delete_group_snapshot(self, group_snapshot_id):
return self.api_delete('/group_snapshots/%s' % group_snapshot_id)
def reset_group_snapshot(self, group_snapshot_id, params):
return self.api_post('/group_snapshots/%s/action' % group_snapshot_id,
params)
def enable_group_replication(self, group_id, params):
return self.api_post('/groups/%s/action' % group_id, params)
def disable_group_replication(self, group_id, params):
return self.api_post('/groups/%s/action' % group_id, params)
def failover_group_replication(self, group_id, params):
return self.api_post('/groups/%s/action' % group_id, params)
def list_group_replication_targets(self, group_id, params):
return self.api_post('/groups/%s/action' % group_id, params)
| |
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import unittest
from mobly import signals
# Have an instance of unittest.TestCase so we could reuse some logic
# from python's own unittest.
_pyunit_proxy = unittest.TestCase()
_pyunit_proxy.maxDiff = None
def _call_unittest_assertion(assertion_method,
*args,
msg=None,
extras=None,
**kwargs):
"""Wrapper for converting a unittest assertion into a Mobly one.
Args:
assertion_method: unittest.TestCase assertion method to call.
*args: Positional arguments for the assertion call.
msg: A string that adds additional info about the failure.
extras: An optional field for extra information to be included in
test result.
**kwargs: Keyword arguments for the assertion call.
"""
my_msg = None
try:
assertion_method(*args, **kwargs)
except AssertionError as e:
my_msg = str(e)
if msg:
my_msg = f'{my_msg} {msg}'
# This raise statement is outside of the above except statement to
# prevent Python3's exception message from having two tracebacks.
if my_msg is not None:
raise signals.TestFailure(my_msg, extras=extras)
def assert_equal(first, second, msg=None, extras=None):
"""Asserts the equality of objects, otherwise fail the test.
Error message is "first != second" by default. Additional explanation can
be supplied in the message.
Args:
first: The first object to compare.
second: The second object to compare.
msg: A string that adds additional info about the failure.
extras: An optional field for extra information to be included in
test result.
"""
_call_unittest_assertion(_pyunit_proxy.assertEqual,
first,
second,
msg=msg,
extras=extras)
def assert_not_equal(first, second, msg=None, extras=None):
"""Asserts that first is not equal (!=) to second."""
_call_unittest_assertion(_pyunit_proxy.assertNotEqual,
first,
second,
msg=msg,
extras=extras)
def assert_almost_equal(first,
second,
places=None,
msg=None,
delta=None,
extras=None):
"""Asserts that first is almost equal to second.
Fails if the two objects are unequal as determined by their difference
rounded to the given number of decimal places (default 7) and
comparing to zero, or by comparing that the difference between the two
objects is more than the given delta.
If the two objects compare equal then they automatically compare
almost equal.
Args:
first: The first value to compare.
second: The second value to compare.
places: How many decimal places to take into account for comparison.
Note that decimal places (from zero) are usually not the same
as significant digits (measured from the most significant digit).
msg: A string that adds additional info about the failure.
delta: Delta to use for comparison instead of decimal places.
extras: An optional field for extra information to be included in
test result.
"""
_call_unittest_assertion(_pyunit_proxy.assertAlmostEqual,
first,
second,
places=places,
msg=msg,
delta=delta,
extras=extras)
def assert_not_almost_equal(first,
second,
places=None,
msg=None,
delta=None,
extras=None):
"""Asserts that first is not almost equal to second.
Args:
first: The first value to compare.
second: The second value to compare.
places: How many decimal places to take into account for comparison.
Note that decimal places (from zero) are usually not the same
as significant digits (measured from the most significant digit).
msg: A string that adds additional info about the failure.
delta: Delta to use for comparison instead of decimal places.
extras: An optional field for extra information to be included in
test result.
"""
_call_unittest_assertion(_pyunit_proxy.assertNotAlmostEqual,
first,
second,
places=places,
msg=msg,
delta=delta,
extras=extras)
def assert_in(member, container, msg=None, extras=None):
"""Asserts that member is in container."""
_call_unittest_assertion(_pyunit_proxy.assertIn,
member,
container,
msg=msg,
extras=extras)
def assert_not_in(member, container, msg=None, extras=None):
"""Asserts that member is not in container."""
_call_unittest_assertion(_pyunit_proxy.assertNotIn,
member,
container,
msg=msg,
extras=extras)
def assert_is(expr1, expr2, msg=None, extras=None):
"""Asserts that expr1 is expr2."""
_call_unittest_assertion(_pyunit_proxy.assertIs,
expr1,
expr2,
msg=msg,
extras=extras)
def assert_is_not(expr1, expr2, msg=None, extras=None):
"""Asserts that expr1 is not expr2."""
_call_unittest_assertion(_pyunit_proxy.assertIsNot,
expr1,
expr2,
msg=msg,
extras=extras)
def assert_count_equal(first, second, msg=None, extras=None):
"""Asserts that two iterables have the same element count.
Element order does not matter.
Similar to assert_equal(Counter(list(first)), Counter(list(second))).
Args:
first: The first iterable to compare.
second: The second iterable to compare.
msg: A string that adds additional info about the failure.
extras: An optional field for extra information to be included in
test result.
Example:
assert_count_equal([0, 1, 1], [1, 0, 1]) passes the assertion.
assert_count_equal([0, 0, 1], [0, 1]) raises an assertion error.
"""
_call_unittest_assertion(_pyunit_proxy.assertCountEqual,
first,
second,
msg=msg,
extras=extras)
def assert_less(a, b, msg=None, extras=None):
"""Asserts that a < b."""
_call_unittest_assertion(_pyunit_proxy.assertLess,
a,
b,
msg=msg,
extras=extras)
def assert_less_equal(a, b, msg=None, extras=None):
"""Asserts that a <= b."""
_call_unittest_assertion(_pyunit_proxy.assertLessEqual,
a,
b,
msg=msg,
extras=extras)
def assert_greater(a, b, msg=None, extras=None):
"""Asserts that a > b."""
_call_unittest_assertion(_pyunit_proxy.assertGreater,
a,
b,
msg=msg,
extras=extras)
def assert_greater_equal(a, b, msg=None, extras=None):
"""Asserts that a >= b."""
_call_unittest_assertion(_pyunit_proxy.assertGreaterEqual,
a,
b,
msg=msg,
extras=extras)
def assert_is_none(obj, msg=None, extras=None):
"""Asserts that obj is None."""
_call_unittest_assertion(_pyunit_proxy.assertIsNone,
obj,
msg=msg,
extras=extras)
def assert_is_not_none(obj, msg=None, extras=None):
"""Asserts that obj is not None."""
_call_unittest_assertion(_pyunit_proxy.assertIsNotNone,
obj,
msg=msg,
extras=extras)
def assert_is_instance(obj, cls, msg=None, extras=None):
"""Asserts that obj is an instance of cls."""
_call_unittest_assertion(_pyunit_proxy.assertIsInstance,
obj,
cls,
msg=msg,
extras=extras)
def assert_not_is_instance(obj, cls, msg=None, extras=None):
"""Asserts that obj is not an instance of cls."""
_call_unittest_assertion(_pyunit_proxy.assertNotIsInstance,
obj,
cls,
msg=msg,
extras=extras)
def assert_regex(text, expected_regex, msg=None, extras=None):
"""Fails the test unless the text matches the regular expression."""
_call_unittest_assertion(_pyunit_proxy.assertRegex,
text,
expected_regex,
msg=msg,
extras=extras)
def assert_not_regex(text, unexpected_regex, msg=None, extras=None):
"""Fails the test if the text matches the regular expression."""
_call_unittest_assertion(_pyunit_proxy.assertNotRegex,
text,
unexpected_regex,
msg=msg,
extras=extras)
def assert_raises(expected_exception, extras=None, *args, **kwargs):
"""Assert that an exception is raised when a function is called.
If no exception is raised, test fail. If an exception is raised but not
of the expected type, the exception is let through.
This should only be used as a context manager:
with assert_raises(Exception):
func()
Args:
expected_exception: An exception class that is expected to be
raised.
extras: An optional field for extra information to be included in
test result.
"""
context = _AssertRaisesContext(expected_exception, extras=extras)
return context
def assert_raises_regex(expected_exception,
expected_regex,
extras=None,
*args,
**kwargs):
"""Assert that an exception is raised when a function is called.
If no exception is raised, test fail. If an exception is raised but not
of the expected type, the exception is let through. If an exception of the
expected type is raised but the error message does not match the
expected_regex, test fail.
This should only be used as a context manager:
with assert_raises(Exception):
func()
Args:
expected_exception: An exception class that is expected to be
raised.
extras: An optional field for extra information to be included in
test result.
"""
context = _AssertRaisesContext(expected_exception,
expected_regex,
extras=extras)
return context
def assert_true(expr, msg, extras=None):
"""Assert an expression evaluates to true, otherwise fail the test.
Args:
expr: The expression that is evaluated.
msg: A string explaining the details in case of failure.
extras: An optional field for extra information to be included in
test result.
"""
if not expr:
fail(msg, extras)
def assert_false(expr, msg, extras=None):
"""Assert an expression evaluates to false, otherwise fail the test.
Args:
expr: The expression that is evaluated.
msg: A string explaining the details in case of failure.
extras: An optional field for extra information to be included in
test result.
"""
if expr:
fail(msg, extras)
def skip(reason, extras=None):
"""Skip a test.
Args:
reason: The reason this test is skipped.
extras: An optional field for extra information to be included in
test result.
Raises:
signals.TestSkip: Mark a test as skipped.
"""
raise signals.TestSkip(reason, extras)
def skip_if(expr, reason, extras=None):
"""Skip a test if expression evaluates to True.
Args:
expr: The expression that is evaluated.
reason: The reason this test is skipped.
extras: An optional field for extra information to be included in
test result.
"""
if expr:
skip(reason, extras)
def abort_class(reason, extras=None):
"""Abort all subsequent tests within the same test class in one iteration.
If one test class is requested multiple times in a test run, this can
only abort one of the requested executions, NOT all.
Args:
reason: The reason to abort.
extras: An optional field for extra information to be included in
test result.
Raises:
signals.TestAbortClass: Abort all subsequent tests in a test class.
"""
raise signals.TestAbortClass(reason, extras)
def abort_class_if(expr, reason, extras=None):
"""Abort all subsequent tests within the same test class in one iteration,
if expression evaluates to True.
If one test class is requested multiple times in a test run, this can
only abort one of the requested executions, NOT all.
Args:
expr: The expression that is evaluated.
reason: The reason to abort.
extras: An optional field for extra information to be included in
test result.
Raises:
signals.TestAbortClass: Abort all subsequent tests in a test class.
"""
if expr:
abort_class(reason, extras)
def abort_all(reason, extras=None):
"""Abort all subsequent tests, including the ones not in this test class or
iteration.
Args:
reason: The reason to abort.
extras: An optional field for extra information to be included in
test result.
Raises:
signals.TestAbortAll: Abort all subsequent tests.
"""
raise signals.TestAbortAll(reason, extras)
def abort_all_if(expr, reason, extras=None):
"""Abort all subsequent tests, if the expression evaluates to True.
Args:
expr: The expression that is evaluated.
reason: The reason to abort.
extras: An optional field for extra information to be included in
test result.
Raises:
signals.TestAbortAll: Abort all subsequent tests.
"""
if expr:
abort_all(reason, extras)
def fail(msg, extras=None):
"""Explicitly fail a test.
Args:
msg: A string explaining the details of the failure.
extras: An optional field for extra information to be included in
test result.
Raises:
signals.TestFailure: Mark a test as failed.
"""
raise signals.TestFailure(msg, extras)
def explicit_pass(msg, extras=None):
"""Explicitly pass a test.
This will pass the test explicitly regardless of any other error happened
in the test body. E.g. even if errors have been recorded with `expects`,
the test will still be marked pass if this is called.
A test without uncaught exception will pass implicitly so this should be
used scarcely.
Args:
msg: A string explaining the details of the passed test.
extras: An optional field for extra information to be included in
test result.
Raises:
signals.TestPass: Mark a test as passed.
"""
raise signals.TestPass(msg, extras)
class _AssertRaisesContext:
"""A context manager used to implement TestCase.assertRaises* methods."""
def __init__(self, expected, expected_regexp=None, extras=None):
self.expected = expected
self.failureException = signals.TestFailure
self.expected_regexp = expected_regexp
self.extras = extras
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
if exc_type is None:
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
raise signals.TestFailure('%s not raised' % exc_name, extras=self.extras)
if not issubclass(exc_type, self.expected):
# let unexpected exceptions pass through
return False
self.exception = exc_value # store for later retrieval
if self.expected_regexp is None:
return True
expected_regexp = self.expected_regexp
if isinstance(expected_regexp, str):
expected_regexp = re.compile(expected_regexp)
if not expected_regexp.search(str(exc_value)):
raise signals.TestFailure('"%s" does not match "%s"' %
(expected_regexp.pattern, str(exc_value)),
extras=self.extras)
return True
| |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import reduce
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import array_ops
from utils import *
from ops import *
class NTMCell(object):
def __init__(self, input_dim, output_dim,
mem_size=128, mem_dim=22, controller_dim=100,
controller_layer_size=1, shift_range=1,
write_head_size=1, read_head_size=1, is_LSTM_mode=False):
"""Initialize the parameters for an NTM cell.
Args:
input_dim: int, The number of units in the LSTM cell
output_dim: int, The dimensionality of the inputs into the LSTM cell
mem_size: (optional) int, The size of memory [128]
mem_dim: (optional) int, The dimensionality for memory [20]
controller_dim: (optional) int, The dimensionality for controller [100]
controller_layer_size: (optional) int, The size of controller layer [1]
"""
# initialize configs
self.input_dim = input_dim
self.output_dim = output_dim
self.mem_size = mem_size
self.mem_dim = mem_dim
self.controller_dim = controller_dim
self.controller_layer_size = controller_layer_size
self.shift_range = shift_range
self.write_head_size = write_head_size
self.read_head_size = read_head_size
self.is_LSTM_mode = is_LSTM_mode
self.depth = 0
self.states = []
def __call__(self, input_, state=None, scope=None):
"""Run one step of NTM.
Args:
inputs: input Tensor, 2D, 1 x input_size.
state: state Dictionary which contains M, read_w, write_w, read,
output, hidden.
scope: VariableScope for the created subgraph; defaults to class name.
Returns:
A tuple containing:
- A 2D, batch x output_dim, Tensor representing the output of the LSTM
after reading "input_" when previous state was "state".
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- A 2D, batch x state_size, Tensor representing the new state of LSTM
after reading "input_" when previous state was "state".
"""
if state == None:
_, state = self.initial_state()
M_prev = state['M']
read_w_list_prev = state['read_w']
write_w_list_prev = state['write_w']
read_list_prev = state['read']
output_list_prev = state['output']
hidden_list_prev = state['hidden']
# build a controller
output_list, hidden_list = self.build_controller(input_, read_list_prev,
output_list_prev,
hidden_list_prev)
# last output layer from LSTM controller
last_output = output_list[-1]
# build a memory
if not self.is_LSTM_mode:
M, read_w_list, write_w_list, read_list = self.build_memory(M_prev,
read_w_list_prev,
write_w_list_prev,
last_output)
else:
M, read_w_list, write_w_list, read_list = 0, 0, 0, 0
# get a new output
new_output = self.new_output(last_output)
state = {
'M': M,
'read_w': read_w_list,
'write_w': write_w_list,
'read': read_list,
'output': output_list,
'hidden': hidden_list,
}
self.depth += 1
self.states.append(state)
return new_output, state
def new_output(self, output):
"""Logistic sigmoid output layers."""
with tf.variable_scope('output'):
logit = Linear(output, self.output_dim, name='output')
return logit
def build_controller(self, input_,
read_list_prev, output_list_prev, hidden_list_prev):
"""Build LSTM controller."""
with tf.variable_scope("controller"):
output_list = []
hidden_list = []
for layer_idx in xrange(self.controller_layer_size):
o_prev = output_list_prev[layer_idx]
h_prev = hidden_list_prev[layer_idx]
if layer_idx == 0:
def new_gate(gate_name):
if not self.is_LSTM_mode:
return linear([input_, o_prev] + read_list_prev,
output_size=self.controller_dim,
bias=True,
scope = "%s_gate_%s" % (gate_name, layer_idx))
else:
return linear([input_, o_prev],
output_size=self.controller_dim,
bias=True,
scope = "%s_gate_%s" % (gate_name, layer_idx))
else:
def new_gate(gate_name):
return linear([output_list[-1], o_prev],
output_size=self.controller_dim,
bias=True,
scope="%s_gate_%s" % (gate_name, layer_idx))
# input, forget, and output gates for LSTM
i = tf.sigmoid(new_gate('input'))
f = tf.sigmoid(new_gate('forget'))
o = tf.sigmoid(new_gate('output'))
update = tf.tanh(new_gate('update'))
# update the sate of the LSTM cell
hid = tf.add_n([f * h_prev, i * update])
out = o * tf.tanh(hid)
hidden_list.append(hid)
output_list.append(out)
return output_list, hidden_list
def build_memory(self, M_prev, read_w_list_prev, write_w_list_prev, last_output):
"""Build a memory to read & write."""
with tf.variable_scope("memory"):
# 3.1 Reading
if self.read_head_size == 1:
read_w_prev = read_w_list_prev[0]
read_w, read = self.build_read_head(M_prev, tf.squeeze(read_w_prev),
last_output, 0)
read_w_list = [read_w]
read_list = [read]
else:
read_w_list = []
read_list = []
for idx in xrange(self.read_head_size):
read_w_prev_idx = read_w_list_prev[idx]
read_w_idx, read_idx = self.build_read_head(M_prev, read_w_prev_idx,
last_output, idx)
read_w_list.append(read_w_idx)
read_list.append(read_idx)
# 3.2 Writing
if self.write_head_size == 1:
write_w_prev = write_w_list_prev[0]
write_w, write, erase = self.build_write_head(M_prev,
tf.squeeze(write_w_prev),
last_output, 0)
M_erase = tf.ones([self.mem_size, self.mem_dim]) \
- outer_product(write_w, erase)
M_write = outer_product(write_w, write)
write_w_list = [write_w]
else:
write_w_list = []
write_list = []
erase_list = []
M_erases = []
M_writes = []
for idx in xrange(self.write_head_size):
write_w_prev_idx = write_w_list_prev[idx]
write_w_idx, write_idx, erase_idx = \
self.build_write_head(M_prev, write_w_prev_idx,
last_output, idx)
write_w_list.append(tf.transpose(write_w_idx))
write_list.append(write_idx)
erase_list.append(erase_idx)
M_erases.append(tf.ones([self.mem_size, self.mem_dim]) \
- outer_product(write_w_idx, erase_idx))
M_writes.append(outer_product(write_w_idx, write_idx))
M_erase = reduce(lambda x, y: x*y, M_erases)
M_write = tf.add_n(M_writes)
M = M_prev * M_erase + M_write
return M, read_w_list, write_w_list, read_list
def build_read_head(self, M_prev, read_w_prev, last_output, idx):
return self.build_head(M_prev, read_w_prev, last_output, True, idx)
def build_write_head(self, M_prev, write_w_prev, last_output, idx):
return self.build_head(M_prev, write_w_prev, last_output, False, idx)
def build_head(self, M_prev, w_prev, last_output, is_read, idx):
scope = "read" if is_read else "write"
with tf.variable_scope(scope):
# Figure 2.
# Amplify or attenuate the precision
with tf.variable_scope("k"):
k = tf.tanh(Linear(last_output, self.mem_dim, name='k_%s' % idx))
# Interpolation gate
with tf.variable_scope("g"):
g = tf.sigmoid(Linear(last_output, 1, name='g_%s' % idx))
# shift weighting
with tf.variable_scope("s_w"):
w = Linear(last_output, 2 * self.shift_range + 1, name='s_w_%s' % idx)
s_w = softmax(w)
with tf.variable_scope("beta"):
beta = tf.nn.softplus(Linear(last_output, 1, name='beta_%s' % idx))
with tf.variable_scope("gamma"):
gamma = tf.add(tf.nn.softplus(Linear(last_output, 1, name='gamma_%s' % idx)),
tf.constant(1.0))
# 3.3.1
# Cosine similarity
similarity = smooth_cosine_similarity(M_prev, k) # [mem_size x 1]
# Focusing by content
content_focused_w = softmax(scalar_mul(similarity, beta))
# 3.3.2
# Focusing by location
gated_w = tf.add_n([
scalar_mul(content_focused_w, g),
scalar_mul(w_prev, (tf.constant(1.0) - g))
])
# Convolutional shifts
conv_w = circular_convolution(gated_w, s_w)
# Sharpening
powed_conv_w = tf.pow(conv_w, gamma)
w = powed_conv_w / tf.reduce_sum(powed_conv_w)
if is_read:
# 3.1 Reading
read = matmul(tf.transpose(M_prev), w)
return w, read
else:
# 3.2 Writing
erase = tf.sigmoid(Linear(last_output, self.mem_dim, name='erase_%s' % idx))
add = tf.tanh(Linear(last_output, self.mem_dim, name='add_%s' % idx))
return w, add, erase
def initial_state(self, dummy_value=0.0):
self.depth = 0
self.states = []
with tf.variable_scope("init_cell"):
# always zero
dummy = tf.Variable(tf.constant([[dummy_value]], dtype=tf.float32))
# memory
M_init_linear = tf.tanh(Linear(dummy, self.mem_size * self.mem_dim,
name='M_init_linear'))
M_init = tf.reshape(M_init_linear, [self.mem_size, self.mem_dim])
# read weights
read_w_list_init = []
read_list_init = []
for idx in xrange(self.read_head_size):
read_w_idx = Linear(dummy, self.mem_size, is_range=True,
squeeze=True, name='read_w_%d' % idx)
read_w_list_init.append(softmax(read_w_idx))
read_init_idx = Linear(dummy, self.mem_dim,
squeeze=True, name='read_init_%d' % idx)
read_list_init.append(tf.tanh(read_init_idx))
# write weights
write_w_list_init = []
for idx in xrange(self.write_head_size):
write_w_idx = Linear(dummy, self.mem_size, is_range=True,
squeeze=True, name='write_w_%s' % idx)
write_w_list_init.append(softmax(write_w_idx))
# controller state
output_init_list = []
hidden_init_list = []
for idx in xrange(self.controller_layer_size):
output_init_idx = Linear(dummy, self.controller_dim,
squeeze=True, name='output_init_%s' % idx)
output_init_list.append(tf.tanh(output_init_idx))
hidden_init_idx = Linear(dummy, self.controller_dim,
squeeze=True, name='hidden_init_%s' % idx)
hidden_init_list.append(tf.tanh(hidden_init_idx))
output = tf.tanh(Linear(dummy, self.output_dim, name='new_output'))
state = {
'M': M_init,
'read_w': read_w_list_init,
'write_w': write_w_list_init,
'read': read_list_init,
'output': output_init_list,
'hidden': hidden_init_list
}
self.depth += 1
self.states.append(state)
return output, state
def get_memory(self, depth=None):
depth = depth if depth else self.depth
return self.states[depth - 1]['M']
def get_read_weights(self, depth=None):
depth = depth if depth else self.depth
return self.states[depth - 1]['read_w']
def get_write_weights(self, depth=None):
depth = depth if depth else self.depth
return self.states[depth - 1]['write_w']
def get_read_vector(self, depth=None):
depth = depth if depth else self.depth
return self.states[depth - 1]['read']
def print_read_max(self, sess):
read_w_list = sess.run(self.get_read_weights())
fmt = "%-4d %.4f"
if self.read_head_size == 1:
print(fmt % (argmax(read_w_list[0])))
else:
for idx in xrange(self.read_head_size):
print(fmt % np.argmax(read_w_list[idx]))
def print_write_max(self, sess):
write_w_list = sess.run(self.get_write_weights())
fmt = "%-4d %.4f"
if self.write_head_size == 1:
print(fmt % (argmax(write_w_list[0])))
else:
for idx in xrange(self.write_head_size):
print(fmt % argmax(write_w_list[idx]))
| |
from binascii import unhexlify, hexlify
from copy import deepcopy
from hashlib import sha256
from ecdsa import SigningKey
from bitcoin_tools.core.keys import serialize_pk, ecdsa_tx_sign
from bitcoin_tools.core.script import InputScript, OutputScript, Script, SIGHASH_ALL, SIGHASH_SINGLE, SIGHASH_NONE, \
SIGHASH_ANYONECANPAY
from bitcoin_tools.utils import change_endianness, encode_varint, int2bytes, is_public_key, is_btc_addr, is_script, \
parse_element, parse_varint, get_prev_ScriptPubKey
class TX:
""" Defines a class TX (transaction) that holds all the modifiable fields of a Bitcoin transaction, such as
version, number of inputs, reference to previous transactions, input and output scripts, value, etc.
"""
def __init__(self):
self.version = None
self.inputs = None
self.outputs = None
self.nLockTime = None
self.prev_tx_id = []
self.prev_out_index = []
self.scriptSig = []
self.scriptSig_len = []
self.nSequence = []
self.value = []
self.scriptPubKey = []
self.scriptPubKey_len = []
self.offset = 0
self.hex = ""
@classmethod
def build_from_hex(cls, hex_tx):
"""
Alias of deserialize class method.
:param hex_tx: Hexadecimal serialized transaction.
:type hex_tx: hex str
:return: The transaction build using the provided hex serialized transaction.
:rtype: TX
"""
return cls.deserialize(hex_tx)
@classmethod
def build_from_scripts(cls, prev_tx_id, prev_out_index, value, scriptSig, scriptPubKey, fees=None):
""" Builds a transaction from already built Input and Output scripts. This builder should be used when building
custom transaction (Non-standard).
:param prev_tx_id: Previous transaction id.
:type prev_tx_id: either str or list of str
:param prev_out_index: Previous output index. Together with prev_tx_id represent the UTXOs the current
transaction is aiming to redeem.
:type prev_out_index: either str or list of str
:param value: Value in Satoshis to be spent.
:type value: either int or list of int
:param scriptSig: Input script containing the restrictions that will lock the transaction.
:type scriptSig: either InputScript or list of InputScript
:param scriptPubKey: Output script containing the redeem fulfilment conditions.
:type scriptPubKey: either OutputScript or list of OutputScript
:param fees: Fees that will be applied to the transaction. If set, fees will be subtracted from the last output.
:type fees: int
:return: The transaction build using the provided scripts.
:rtype: TX
"""
tx = cls()
# Normalize all parameters
if isinstance(prev_tx_id, str):
prev_tx_id = [prev_tx_id]
if isinstance(prev_out_index, int):
prev_out_index = [prev_out_index]
if isinstance(value, int):
value = [value]
if isinstance(scriptSig, InputScript):
scriptSig = [scriptSig]
if isinstance(scriptPubKey, OutputScript):
scriptPubKey = [scriptPubKey]
if len(prev_tx_id) is not len(prev_out_index) or len(prev_tx_id) is not len(scriptSig):
raise Exception("The number ofs UTXOs to spend must match with the number os ScriptSigs to set.")
elif len(scriptSig) == 0 or len(scriptPubKey) == 0:
raise Exception("Scripts can't be empty")
else:
tx.version = 1
# INPUTS
tx.inputs = len(prev_tx_id)
tx.prev_tx_id = prev_tx_id
tx.prev_out_index = prev_out_index
for i in range(tx.inputs):
# ScriptSig
tx.scriptSig_len.append(len(scriptSig[i].content) / 2)
tx.scriptSig.append(scriptSig[i])
tx.nSequence.append(pow(2, 32) - 1) # ffffffff
# OUTPUTS
tx.outputs = len(scriptPubKey)
for i in range(tx.outputs):
tx.value.append(value[i])
# ScriptPubKey
tx.scriptPubKey_len.append(len(scriptPubKey[i].content) / 2)
tx.scriptPubKey.append(scriptPubKey[i]) # Output script.
# If fees have been set, subtract them from the final value. Otherwise, assume they have been already
# subtracted when specifying the amounts.
if fees:
tx.value[-1] -= fees
tx.nLockTime = 0
tx.hex = tx.serialize()
return tx
@classmethod
def build_from_io(cls, prev_tx_id, prev_out_index, value, outputs, fees=None, network='test'):
""" Builds a transaction from a collection of inputs and outputs, such as previous transactions references and
output references (either public keys, Bitcoin addresses, list of public keys (for multisig transactions), etc).
This builder leaves the transaction ready to sign, so its the one to be used in most cases
(Standard transactions).
outputs format:
P2PKH -> Bitcoin address, or list of Bitcoin addresses.
e.g: output = btc_addr or output = [btc_addr0, btc_addr1, ...]
P2PK -> Serialized Public key, or list of serialized pubic keys. (use keys.serialize_pk)
e.g: output = pk or output = [pk0, pk1, ...]
P2MS -> List of int (m) and public keys, or list of lists of int (m_i) and public keys. m represent the m-of-n
number of public keys needed to redeem the transaction.
e.g: output = [n, pk0, pk1, ...] or output = [[n_0, pk0_0, pk0_1, ...], [n_1, pk1_0, pk1_1, ...], ...]
P2SH -> script hash (hash160 str hex) or list of hash 160s.
e.g: output = da1745e9b549bd0bfa1a569971c77eba30cd5a4b or output = [da1745e9b549bd0bfa1a569971c77eba30cd5a4b,
...]
:param prev_tx_id: Previous transaction id.
:type prev_tx_id: either str or list of str
:param prev_out_index: Previous output index. Together with prev_tx_id represent the UTXOs the current
transaction is aiming to redeem.
:type prev_out_index: either str or list of str
:param value: Value in Satoshis to be spent.
:type value: either int or list of int
:param outputs: Information to build the output of the transaction.
:type outputs: See above outputs format.
:param fees: Fees that will be applied to the transaction. If set, fees will be subtracted from the last output.
:type fees: int
:param network: Network into which the transaction will be published (either mainnet or testnet).
:type network: str
:return: Transaction build with the input and output provided data.
:rtype: TX
"""
ins = []
outs = []
# Normalize all parameters
if isinstance(prev_tx_id, str):
prev_tx_id = [prev_tx_id]
if isinstance(prev_out_index, int):
prev_out_index = [prev_out_index]
if isinstance(value, int):
value = [value]
if isinstance(outputs, str) or (isinstance(outputs, list) and isinstance(outputs[0], int)):
outputs = [outputs]
# If fees have been set, subtract them from the final value. Otherwise, assume they have been already
# subtracted when specifying the amounts.
if fees:
value[-1] -= fees
if len(prev_tx_id) != len(prev_out_index):
raise Exception("Previous transaction id and index number of elements must match. " + str(len(prev_tx_id))
+ "!= " + str(len(prev_out_index)))
elif len(value) != len(outputs):
raise Exception("Each output must have set a Satoshi amount. Use 0 if no value is going to be transferred.")
for o in outputs:
# Multisig outputs are passes ad an integer m representing the m-of-n transaction, amb m public keys.
if isinstance(o, list) and o[0] in range(1, 15):
pks = [is_public_key(pk) for pk in o[1:]]
if all(pks):
oscript = OutputScript.P2MS(o[0], len(o) - 1, o[1:])
else:
raise Exception("Bad output")
elif is_public_key(o):
oscript = OutputScript.P2PK(o)
elif is_btc_addr(o, network):
oscript = OutputScript.P2PKH(o)
elif is_script(o):
oscript = OutputScript.P2SH(o)
else:
raise Exception("Bad output")
outs.append(deepcopy(oscript))
for i in range(len(prev_tx_id)):
# Temporarily set IS content to 0, since data will be signed afterwards.
iscript = InputScript()
ins.append(iscript)
# Once all inputs and outputs has been formatted as scripts, we could construct the transaction with the proper
# builder.
tx = cls.build_from_scripts(prev_tx_id, prev_out_index, value, ins, outs)
return tx
@classmethod
def deserialize(cls, hex_tx):
""" Builds a transaction object from the hexadecimal serialization format of a transaction that
could be obtained, for example, from a blockexplorer.
:param hex_tx: Hexadecimal serialized transaction.
:type hex_tx: hex str
:return: The transaction build using the provided hex serialized transaction.
:rtype: TX
"""
tx = cls()
tx.hex = hex_tx
tx.version = int(change_endianness(parse_element(tx, 4)), 16)
# INPUTS
tx.inputs = int(parse_varint(tx), 16)
for i in range(tx.inputs):
tx.prev_tx_id.append(change_endianness(parse_element(tx, 32)))
tx.prev_out_index.append(int(change_endianness(parse_element(tx, 4)), 16))
# ScriptSig
tx.scriptSig_len.append(int(parse_varint(tx), 16))
tx.scriptSig.append(InputScript.from_hex(parse_element(tx, tx.scriptSig_len[i])))
tx.nSequence.append(int(parse_element(tx, 4), 16))
# OUTPUTS
tx.outputs = int(parse_varint(tx), 16)
for i in range(tx.outputs):
tx.value.append(int(change_endianness(parse_element(tx, 8)), 16))
# ScriptPubKey
tx.scriptPubKey_len.append(int(parse_varint(tx), 16))
tx.scriptPubKey.append(OutputScript.from_hex(parse_element(tx, tx.scriptPubKey_len[i])))
tx.nLockTime = int(parse_element(tx, 4), 16)
if tx.offset != len(tx.hex):
raise Exception("There is some error in the serialized transaction passed as input. Transaction can't"
" be built")
else:
tx.offset = 0
return tx
def serialize(self, rtype=hex):
""" Serialize all the transaction fields arranged in the proper order, resulting in a hexadecimal string
ready to be broadcast to the network.
:param self: self
:type self: TX
:param rtype: Whether the serialized transaction is returned as a hex str or a byte array.
:type rtype: hex or bool
:return: Serialized transaction representation (hexadecimal or bin depending on rtype parameter).
:rtype: hex str / bin
"""
if rtype not in [hex, bin]:
raise Exception("Invalid return type (rtype). It should be either hex or bin.")
serialized_tx = change_endianness(int2bytes(self.version, 4)) # 4-byte version number (LE).
# INPUTS
serialized_tx += encode_varint(self.inputs) # Varint number of inputs.
for i in range(self.inputs):
serialized_tx += change_endianness(self.prev_tx_id[i]) # 32-byte hash of the previous transaction (LE).
serialized_tx += change_endianness(int2bytes(self.prev_out_index[i], 4)) # 4-byte output index (LE)
serialized_tx += encode_varint(len(self.scriptSig[i].content) / 2) # Varint input script length.
# ScriptSig
serialized_tx += self.scriptSig[i].content # Input script.
serialized_tx += int2bytes(self.nSequence[i], 4) # 4-byte sequence number.
# OUTPUTS
serialized_tx += encode_varint(self.outputs) # Varint number of outputs.
if self.outputs != 0:
for i in range(self.outputs):
serialized_tx += change_endianness(int2bytes(self.value[i], 8)) # 8-byte field Satoshi value (LE)
# ScriptPubKey
serialized_tx += encode_varint(len(self.scriptPubKey[i].content) / 2) # Varint Output script length.
serialized_tx += self.scriptPubKey[i].content # Output script.
serialized_tx += int2bytes(self.nLockTime, 4) # 4-byte lock time field
# If return type has been set to binary, the serialized transaction is converted.
if rtype is bin:
serialized_tx = unhexlify(serialized_tx)
return serialized_tx
def get_txid(self, rtype=hex, endianness="LE"):
""" Computes the transaction id (i.e: transaction hash for non-segwit txs).
:param rtype: Defines the type of return, either hex str or bytes.
:type rtype: str or bin
:param endianness: Whether the id is returned in BE (Big endian) or LE (Little Endian) (default one)
:type endianness: str
:return: The hash of the transaction (i.e: transaction id)
:rtype: hex str or bin, depending on rtype parameter.
"""
if rtype not in [hex, bin]:
raise Exception("Invalid return type (rtype). It should be either hex or bin.")
if endianness not in ["BE", "LE"]:
raise Exception("Invalid endianness type. It should be either BE or LE.")
if rtype is hex:
tx_id = hexlify(sha256(sha256(self.serialize(rtype=bin)).digest()).digest())
if endianness == "BE":
tx_id = change_endianness(tx_id)
else:
tx_id = sha256(sha256(self.serialize(rtype=bin)).digest()).digest()
if endianness == "BE":
tx_id = unhexlify(change_endianness(hexlify(tx_id)))
return tx_id
def sign(self, sk, index, hashflag=SIGHASH_ALL, compressed=True, orphan=False, deterministic=True, network='test'):
""" Signs a transaction using the provided private key(s), index(es) and hash type. If more than one key and index
is provides, key i will sign the ith input of the transaction.
:param sk: Private key(s) used to sign the ith transaction input (defined by index).
:type sk: SigningKey or list of SigningKey.
:param index: Index(es) to be signed by the provided key(s).
:type index: int or list of int
:param hashflag: Hash type to be used. It will define what signature format will the unsigned transaction have.
:type hashflag: int
:param compressed: Indicates if the public key that goes along with the signature will be compressed or not.
:type compressed: bool
:param orphan: Whether the inputs to be signed are orphan or not. Orphan inputs are those who are trying to
redeem from a utxo that has not been included in the blockchain or has not been seen by other nodes.
Orphan inputs must provide a dict with the index of the input and an OutputScript that matches the utxo to be
redeemed.
e.g:
orphan_input = dict({0: OutputScript.P2PKH(btc_addr))
:type orphan: dict(index, InputScript)
:param deterministic: Whether the signature is performed using a deterministic k or not. Set by default.
:type deterministic: bool
:param network: Network from which the previous ScripPubKey will be queried (either main or test).
:type network: str
:return: Transaction signature.
:rtype: str
"""
# Normalize all parameters
if isinstance(sk, list) and isinstance(index, int):
# In case a list for multisig is received as only input.
sk = [sk]
if isinstance(sk, SigningKey):
sk = [sk]
if isinstance(index, int):
index = [index]
for i in range(len(sk)):
# If the input to be signed is orphan, the OutputScript of the UTXO to be redeemed will be passed to
# the signature_format function, otherwise False is passed and the UTXO will be requested afterwards.
o = orphan if not orphan else orphan.get(i)
# The unsigned transaction is formatted depending on the input that is going to be signed. For input i,
# the ScriptSig[i] will be set to the scriptPubKey of the UTXO that input i tries to redeem, while all
# the other inputs will be set blank.
unsigned_tx = self.signature_format(index[i], hashflag, o, network)
# Then, depending on the format how the private keys have been passed to the signing function
# and the content of the ScripSig field, a different final scriptSig will be created.
if isinstance(sk[i], list) and unsigned_tx.scriptSig[index[i]].type is "P2MS":
sigs = []
for k in sk[i]:
sigs.append(ecdsa_tx_sign(unsigned_tx.serialize(), k, hashflag, deterministic))
iscript = InputScript.P2MS(sigs)
elif isinstance(sk[i], SigningKey) and unsigned_tx.scriptSig[index[i]].type is "P2PK":
s = ecdsa_tx_sign(unsigned_tx.serialize(), sk[i], hashflag, deterministic)
iscript = InputScript.P2PK(s)
elif isinstance(sk[i], SigningKey) and unsigned_tx.scriptSig[index[i]].type is "P2PKH":
s = ecdsa_tx_sign(unsigned_tx.serialize(), sk[i], hashflag, deterministic)
pk = serialize_pk(sk[i].get_verifying_key(), compressed)
iscript = InputScript.P2PKH(s, pk)
elif unsigned_tx.scriptSig[index[i]].type is "unknown":
raise Exception("Unknown previous transaction output script type. Can't sign the transaction.")
else:
raise Exception("Can't sign input " + str(i) + " with the provided data.")
# Finally, temporal scripts are stored as final and the length of the script is computed
self.scriptSig[i] = iscript
self.scriptSig_len[i] = len(iscript.content) / 2
self.hex = self.serialize()
def signature_format(self, index, hashflag=SIGHASH_ALL, orphan=False, network='test'):
""" Builds the signature format an unsigned transaction has to follow in order to be signed. Basically empties
every InputScript field but the one to be signed, identified by index, that will be filled with the OutputScript
from the UTXO that will be redeemed.
The format of the OutputScripts will depend on the hashflag:
- SIGHASH_ALL leaves OutputScript unchanged.
- SIGHASH_SINGLE should sign each input with the output of the same index (not implemented yet).
- SIGHASH_NONE empies all the outputs.
- SIGHASH_ANYONECANPAY not sure about what should do (obviously not implemented yet).
:param index: The index of the input to be signed.
:type index: int
:param hashflag: Hash type to be used, see above description for further information.
:type hashflag: int
:param orphan: Whether the input is orphan or not. Orphan inputs must provide an OutputScript that matches the
utxo to be redeemed.
:type orphan: OutputScript
:param network: Network into which the transaction will be published (either mainnet or testnet).
:type network: str
:return: Transaction properly formatted to be signed.
:rtype TX
"""
tx = deepcopy(self)
for i in range(tx.inputs):
if i is index:
if not orphan:
script, t = get_prev_ScriptPubKey(tx.prev_tx_id[i], tx.prev_out_index[i], network)
# Once we get the previous UTXO script, the inputScript is temporarily set to it in order to sign
# the transaction.
tx.scriptSig[i] = InputScript.from_hex(script)
tx.scriptSig[i].type = t
else:
# If input to be signed is orphan, the orphan InputScript is used when signing the transaction.
tx.scriptSig[i] = orphan
tx.scriptSig_len[i] = len(tx.scriptSig[i].content) / 2
elif tx.scriptSig[i].content != "":
# All other scriptSig fields are emptied and their length is set to 0.
tx.scriptSig[i] = InputScript()
tx.scriptSig_len[i] = len(tx.scriptSig[i].content) / 2
if hashflag is SIGHASH_SINGLE:
# First we checks if the input that we are trying to sign has a corresponding output, if so, the execution
# can continue. Otherwise, we abort the signature process since it could lead to a irreversible lose of
# funds due to a bug in SIGHASH_SINGLE.
# https://bitcointalk.org/index.php?topic=260595
if index >= tx.outputs:
raise Exception("You are trying to use SIGHASH_SINGLE to sign an input that does not have a "
"corresponding output (" + str(index) + "). This could lead to a irreversible lose "
"of funds. Signature process aborted.")
# Otherwise, all outputs will set to empty scripts but the ith one (identified by index),
# since SIGHASH_SINGLE should only sign the ith input with the ith output.
else:
# How to properly deal with SIGHASH_SINGLE signature format extracted from:
# https://github.com/bitcoin/bitcoin/blob/3192e5278a/test/functional/test_framework/script.py#L869
# First we backup the output that we will sign,
t_script = tx.scriptPubKey[index]
t_size = tx.scriptPubKey_len[index]
t_value = tx.value[index]
# Then, we delete every single output.
tx.scriptPubKey = []
tx.scriptPubKey_len = []
tx.value = []
for o in range(index):
# Once the all outputs have been deleted, we create empty outputs for every single index before
# the one that will be signed. Furthermore, the value of the output if set to maximum (2^64-1)
tx.scriptPubKey.append(OutputScript())
tx.scriptPubKey_len.append(len(tx.scriptPubKey[o].content) / 2)
tx.value.append(pow(2, 64) - 1)
# Once we reach the index of the output that will be signed, we restore it with the one that we backed
# up before.
tx.scriptPubKey.append(t_script)
tx.scriptPubKey_len.append(t_size)
tx.value.append(t_value)
# Finally, we recalculate the number of outputs for the signature format.
# Notice that each signature format will have index number of outputs! Otherwise it will be invalid.
tx.outputs = len(tx.scriptPubKey)
elif hashflag is SIGHASH_NONE:
# Empty all the scriptPubKeys and set the length and the output counter to 0.
tx.outputs = 0
tx.scriptPubKey = OutputScript()
tx.scriptPubKey_len = len(tx.scriptPubKey.content) / 2
elif hashflag is SIGHASH_ANYONECANPAY:
# ToDo: Implement SIGHASH_ANYONECANPAY
pass
if hashflag in [SIGHASH_SINGLE, SIGHASH_NONE]:
# All the nSequence from inputs except for the current one (index) is set to 0.
# https://github.com/bitcoin/bitcoin/blob/3192e5278a/test/functional/test_framework/script.py#L880
for i in range(tx.inputs):
if i is not index:
tx.nSequence[i] = 0
return tx
def display(self):
""" Displays all the information related to the transaction object, properly split and arranged.
Data between parenthesis corresponds to the data encoded following the serialized transaction format.
(replicates the same encoding being done in serialize method)
:param self: self
:type self: TX
:return: None.
:rtype: None
"""
print "version: " + str(self.version) + " (" + change_endianness(int2bytes(self.version, 4)) + ")"
print "number of inputs: " + str(self.inputs) + " (" + encode_varint(self.inputs) + ")"
for i in range(self.inputs):
print "input " + str(i)
print "\t previous txid (little endian): " + self.prev_tx_id[i] + \
" (" + change_endianness(self.prev_tx_id[i]) + ")"
print "\t previous tx output (little endian): " + str(self.prev_out_index[i]) + \
" (" + change_endianness(int2bytes(self.prev_out_index[i], 4)) + ")"
print "\t input script (scriptSig) length: " + str(self.scriptSig_len[i]) \
+ " (" + encode_varint((self.scriptSig_len[i])) + ")"
print "\t input script (scriptSig): " + self.scriptSig[i].content
print "\t decoded scriptSig: " + Script.deserialize(self.scriptSig[i].content)
if self.scriptSig[i].type is "P2SH":
print "\t \t decoded redeemScript: " + InputScript.deserialize(self.scriptSig[i].get_element(-1)[1:-1])
print "\t nSequence: " + str(self.nSequence[i]) + " (" + int2bytes(self.nSequence[i], 4) + ")"
print "number of outputs: " + str(self.outputs) + " (" + encode_varint(self.outputs) + ")"
for i in range(self.outputs):
print "output " + str(i)
print "\t Satoshis to be spent (little endian): " + str(self.value[i]) + \
" (" + change_endianness(int2bytes(self.value[i], 8)) + ")"
print "\t output script (scriptPubKey) length: " + str(self.scriptPubKey_len[i]) \
+ " (" + encode_varint(self.scriptPubKey_len[i]) + ")"
print "\t output script (scriptPubKey): " + self.scriptPubKey[i].content
print "\t decoded scriptPubKey: " + Script.deserialize(self.scriptPubKey[i].content)
print "nLockTime: " + str(self.nLockTime) + " (" + int2bytes(self.nLockTime, 4) + ")"
| |
import hashlib
import logging
import os
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.db import connection, transaction
from celeryutils import task
from PIL import Image
from addons.models import Persona
from editors.models import RereviewQueueTheme
import amo
from amo.decorators import set_modified_on, write
from amo.storage_utils import rm_stored_dir
from amo.utils import cache_ns_key, ImageCheck, LocalFileStorage
from lib.es.utils import index_objects
from versions.models import Version
# pulling tasks from cron
from . import cron, search # NOQA
from .models import (Addon, attach_categories, attach_tags,
attach_translations, CompatOverride,
IncompatibleVersions, Preview)
log = logging.getLogger('z.task')
@task
@write
def version_changed(addon_id, **kw):
update_last_updated(addon_id)
update_appsupport([addon_id])
def update_last_updated(addon_id):
queries = Addon._last_updated_queries()
try:
addon = Addon.objects.get(pk=addon_id)
except Addon.DoesNotExist:
log.info('[1@None] Updating last updated for %s failed, no addon found'
% addon_id)
return
log.info('[1@None] Updating last updated for %s.' % addon_id)
if addon.is_persona():
q = 'personas'
elif addon.status == amo.STATUS_PUBLIC:
q = 'public'
else:
q = 'exp'
qs = queries[q].filter(pk=addon_id).using('default')
res = qs.values_list('id', 'last_updated')
if res:
pk, t = res[0]
Addon.objects.filter(pk=pk).update(last_updated=t)
@transaction.commit_on_success
def update_appsupport(ids):
log.info("[%s@None] Updating appsupport for %s." % (len(ids), ids))
delete = 'DELETE FROM appsupport WHERE addon_id IN (%s)'
insert = """INSERT INTO appsupport
(addon_id, app_id, min, max, created, modified)
VALUES %s"""
addons = Addon.objects.no_cache().filter(id__in=ids).no_transforms()
apps = []
for addon in addons:
for app, appver in addon.compatible_apps.items():
if appver is None:
# Fake support for all version ranges.
min_, max_ = 0, 999999999999999999
else:
min_, max_ = appver.min.version_int, appver.max.version_int
apps.append((addon.id, app.id, min_, max_))
s = ','.join('(%s, %s, %s, %s, NOW(), NOW())' % x for x in apps)
if not apps:
return
cursor = connection.cursor()
cursor.execute(delete % ','.join(map(str, ids)))
cursor.execute(insert % s)
# All our updates were sql, so invalidate manually.
Addon.objects.invalidate(*addons)
@task
def delete_preview_files(id, **kw):
log.info('[1@None] Removing preview with id of %s.' % id)
p = Preview(id=id)
for f in (p.thumbnail_path, p.image_path):
try:
storage.delete(f)
except Exception, e:
log.error('Error deleting preview file (%s): %s' % (f, e))
@task(acks_late=True)
def index_addons(ids, **kw):
log.info('Indexing addons %s-%s. [%s]' % (ids[0], ids[-1], len(ids)))
transforms = (attach_categories, attach_tags, attach_translations)
index_objects(ids, Addon, search, kw.pop('index', None), transforms)
@task
def unindex_addons(ids, **kw):
for addon in ids:
log.info('Removing addon [%s] from search index.' % addon)
Addon.unindex(addon)
@task
def delete_persona_image(dst, **kw):
log.info('[1@None] Deleting persona image: %s.' % dst)
if not dst.startswith(settings.ADDONS_PATH):
log.error("Someone tried deleting something they shouldn't: %s" % dst)
return
try:
storage.delete(dst)
except Exception, e:
log.error('Error deleting persona image: %s' % e)
@set_modified_on
def create_persona_preview_images(src, full_dst, **kw):
"""
Creates a 680x100 thumbnail used for the Persona preview and
a 32x32 thumbnail used for search suggestions/detail pages.
"""
log.info('[1@None] Resizing persona images: %s' % full_dst)
preview, full = amo.PERSONA_IMAGE_SIZES['header']
preview_w, preview_h = preview
orig_w, orig_h = full
with storage.open(src) as fp:
i_orig = i = Image.open(fp)
# Crop image from the right.
i = i.crop((orig_w - (preview_w * 2), 0, orig_w, orig_h))
# Resize preview.
i = i.resize(preview, Image.ANTIALIAS)
i.load()
with storage.open(full_dst[0], 'wb') as fp:
i.save(fp, 'png')
_, icon_size = amo.PERSONA_IMAGE_SIZES['icon']
icon_w, icon_h = icon_size
# Resize icon.
i = i_orig
i.load()
i = i.crop((orig_w - (preview_h * 2), 0, orig_w, orig_h))
i = i.resize(icon_size, Image.ANTIALIAS)
i.load()
with storage.open(full_dst[1], 'wb') as fp:
i.save(fp, 'png')
return True
@set_modified_on
def save_persona_image(src, full_dst, **kw):
"""Creates a PNG of a Persona header/footer image."""
log.info('[1@None] Saving persona image: %s' % full_dst)
img = ImageCheck(storage.open(src))
if not img.is_image():
log.error('Not an image: %s' % src, exc_info=True)
return
with storage.open(src, 'rb') as fp:
i = Image.open(fp)
with storage.open(full_dst, 'wb') as fp:
i.save(fp, 'png')
return True
@task
def update_incompatible_appversions(data, **kw):
"""Updates the incompatible_versions table for this version."""
log.info('Updating incompatible_versions for %s versions.' % len(data))
addon_ids = set()
for version_id in data:
# This is here to handle both post_save and post_delete hooks.
IncompatibleVersions.objects.filter(version=version_id).delete()
try:
version = Version.objects.get(pk=version_id)
except Version.DoesNotExist:
log.info('Version ID [%d] not found. Incompatible versions were '
'cleared.' % version_id)
return
addon_ids.add(version.addon_id)
try:
compat = CompatOverride.objects.get(addon=version.addon)
except CompatOverride.DoesNotExist:
log.info('Compat override for addon with version ID [%d] not '
'found. Incompatible versions were cleared.' % version_id)
return
app_ranges = []
ranges = compat.collapsed_ranges()
for range in ranges:
if range.min == '0' and range.max == '*':
# Wildcard range, add all app ranges
app_ranges.extend(range.apps)
else:
# Since we can't rely on add-on version numbers, get the min
# and max ID values and find versions whose ID is within those
# ranges, being careful with wildcards.
min_id = max_id = None
if range.min == '0':
versions = (Version.objects.filter(addon=version.addon_id)
.order_by('id')
.values_list('id', flat=True)[:1])
if versions:
min_id = versions[0]
else:
try:
min_id = Version.objects.get(addon=version.addon_id,
version=range.min).id
except Version.DoesNotExist:
pass
if range.max == '*':
versions = (Version.objects.filter(addon=version.addon_id)
.order_by('-id')
.values_list('id', flat=True)[:1])
if versions:
max_id = versions[0]
else:
try:
max_id = Version.objects.get(addon=version.addon_id,
version=range.max).id
except Version.DoesNotExist:
pass
if min_id and max_id:
if min_id <= version.id <= max_id:
app_ranges.extend(range.apps)
for app_range in app_ranges:
IncompatibleVersions.objects.create(version=version,
app_id=app_range.app.id,
min_app_version=app_range.min,
max_app_version=app_range.max)
log.info('Added incompatible version for version ID [%d]: '
'app:%d, %s -> %s' % (version_id, app_range.app.id,
app_range.min, app_range.max))
# Increment namespace cache of compat versions.
for addon_id in addon_ids:
cache_ns_key('d2c-versions:%s' % addon_id, increment=True)
def make_checksum(header_path, footer_path):
ls = LocalFileStorage()
raw_checksum = ls._open(header_path).read() + ls._open(footer_path).read()
return hashlib.sha224(raw_checksum).hexdigest()
def theme_checksum(theme, **kw):
theme.checksum = make_checksum(theme.header_path, theme.footer_path)
dupe_personas = Persona.objects.filter(checksum=theme.checksum)
if dupe_personas.exists():
theme.dupe_persona = dupe_personas[0]
theme.save()
def rereviewqueuetheme_checksum(rqt, **kw):
"""Check for possible duplicate theme images."""
dupe_personas = Persona.objects.filter(
checksum=make_checksum(rqt.header_path or rqt.theme.header_path,
rqt.footer_path or rqt.theme.footer_path))
if dupe_personas.exists():
rqt.dupe_persona = dupe_personas[0]
rqt.save()
@task
@write
def save_theme(header, footer, addon, **kw):
"""Save theme image and calculates checksum after theme save."""
dst_root = os.path.join(settings.ADDONS_PATH, str(addon.id))
header = os.path.join(settings.TMP_PATH, 'persona_header', header)
footer = os.path.join(settings.TMP_PATH, 'persona_footer', footer)
header_dst = os.path.join(dst_root, 'header.png')
footer_dst = os.path.join(dst_root, 'footer.png')
try:
save_persona_image(src=header, full_dst=header_dst)
save_persona_image(src=footer, full_dst=footer_dst)
create_persona_preview_images(
src=header, full_dst=[os.path.join(dst_root, 'preview.png'),
os.path.join(dst_root, 'icon.png')],
set_modified_on=[addon])
theme_checksum(addon.persona)
except IOError:
addon.delete()
raise
@task
@write
def save_theme_reupload(header, footer, addon, **kw):
header_dst = None
footer_dst = None
dst_root = os.path.join(settings.ADDONS_PATH, str(addon.id))
try:
if header:
header = os.path.join(settings.TMP_PATH, 'persona_header', header)
header_dst = os.path.join(dst_root, 'pending_header.png')
save_persona_image(src=header, full_dst=header_dst)
if footer:
footer = os.path.join(settings.TMP_PATH, 'persona_footer', footer)
footer_dst = os.path.join(dst_root, 'pending_footer.png')
save_persona_image(src=footer, full_dst=footer_dst)
except IOError as e:
log.error(str(e))
raise
if header_dst or footer_dst:
theme = addon.persona
header = 'pending_header.png' if header_dst else theme.header
footer = 'pending_footer.png' if footer_dst else theme.footer
# Store pending header and/or footer file paths for review.
RereviewQueueTheme.objects.filter(theme=theme).delete()
rqt = RereviewQueueTheme(theme=theme, header=header, footer=footer)
rereviewqueuetheme_checksum(rqt=rqt)
rqt.save()
@task
@write
def calc_checksum(theme_id, **kw):
"""For migration 596."""
lfs = LocalFileStorage()
theme = Persona.objects.get(id=theme_id)
header = theme.header_path
footer = theme.footer_path
# Delete invalid themes that are not images (e.g. PDF, EXE).
try:
Image.open(header)
Image.open(footer)
except IOError:
log.info('Deleting invalid theme [%s] (header: %s) (footer: %s)' %
(theme.addon.id, header, footer))
theme.addon.delete()
theme.delete()
rm_stored_dir(header.replace('header.png', ''), storage=lfs)
return
# Calculate checksum and save.
try:
theme.checksum = make_checksum(header, footer)
theme.save()
except IOError as e:
log.error(str(e))
| |
import os
import platform
import shutil
from collections import OrderedDict
from jinja2 import Environment, select_autoescape, FileSystemLoader, ChoiceLoader, Template
from conans.assets.templates import dict_loader
from conans.client.cache.editable import EditablePackages
from conans.client.cache.remote_registry import RemoteRegistry
from conans.client.conf import ConanClientConfigParser, get_default_client_conf, \
get_default_settings_yml
from conans.client.conf.detect import detect_defaults_settings
from conans.client.output import Color
from conans.client.profile_loader import read_profile
from conans.client.store.localdb import LocalDB
from conans.errors import ConanException
from conans.model.conf import ConfDefinition
from conans.model.profile import Profile
from conans.model.ref import ConanFileReference
from conans.model.settings import Settings
from conans.paths import ARTIFACTS_PROPERTIES_FILE
from conans.paths.package_layouts.package_cache_layout import PackageCacheLayout
from conans.paths.package_layouts.package_editable_layout import PackageEditableLayout
from conans.util.files import list_folder_subdirs, load, normalize, save, remove
from conans.util.locks import Lock
CONAN_CONF = 'conan.conf'
CONAN_SETTINGS = "settings.yml"
LOCALDB = ".conan.db"
REMOTES = "remotes.json"
PROFILES_FOLDER = "profiles"
HOOKS_FOLDER = "hooks"
TEMPLATES_FOLDER = "templates"
GENERATORS_FOLDER = "generators"
def _is_case_insensitive_os():
system = platform.system()
return system != "Linux" and system != "FreeBSD" and system != "SunOS"
if _is_case_insensitive_os():
def _check_ref_case(ref, store_folder):
if not os.path.exists(store_folder):
return
tmp = store_folder
for part in ref.dir_repr().split("/"):
items = os.listdir(tmp)
try:
idx = [item.lower() for item in items].index(part.lower())
if part != items[idx]:
raise ConanException("Requested '{requested}', but found case incompatible"
" recipe with name '{existing}' in the cache. Case"
" insensitive filesystem can't manage this.\n Remove"
" existing recipe '{existing}' and try again.".format(
requested=str(ref), existing=items[idx]
))
tmp = os.path.normpath(tmp + os.sep + part)
except ValueError:
return
else:
def _check_ref_case(ref, store_folder): # @UnusedVariable
pass
class ClientCache(object):
""" Class to represent/store/compute all the paths involved in the execution
of conans commands. Accesses to real disk and reads/write things. (OLD client ConanPaths)
"""
def __init__(self, cache_folder, output):
self.cache_folder = cache_folder
self._output = output
# Caching
self._no_lock = None
self._config = None
self._new_config = None
self.editable_packages = EditablePackages(self.cache_folder)
# paths
self._store_folder = self.config.storage_path or os.path.join(self.cache_folder, "data")
# Just call it to make it raise in case of short_paths misconfiguration
_ = self.config.short_paths_home
def all_refs(self):
subdirs = list_folder_subdirs(basedir=self._store_folder, level=4)
return [ConanFileReference.load_dir_repr(folder) for folder in subdirs]
@property
def store(self):
return self._store_folder
def installed_as_editable(self, ref):
return isinstance(self.package_layout(ref), PackageEditableLayout)
@property
def config_install_file(self):
return os.path.join(self.cache_folder, "config_install.json")
def package_layout(self, ref, short_paths=None):
assert isinstance(ref, ConanFileReference), "It is a {}".format(type(ref))
edited_ref = self.editable_packages.get(ref.copy_clear_rev())
if edited_ref:
conanfile_path = edited_ref["path"]
layout_file = edited_ref["layout"]
return PackageEditableLayout(os.path.dirname(conanfile_path), layout_file, ref,
conanfile_path, edited_ref.get("output_folder"))
else:
_check_ref_case(ref, self.store)
base_folder = os.path.normpath(os.path.join(self.store, ref.dir_repr()))
return PackageCacheLayout(base_folder=base_folder, ref=ref,
short_paths=short_paths, no_lock=self._no_locks())
@property
def remotes_path(self):
return os.path.join(self.cache_folder, REMOTES)
@property
def registry(self):
return RemoteRegistry(self, self._output)
def _no_locks(self):
if self._no_lock is None:
self._no_lock = self.config.cache_no_locks
return self._no_lock
@property
def artifacts_properties_path(self):
return os.path.join(self.cache_folder, ARTIFACTS_PROPERTIES_FILE)
def read_artifacts_properties(self):
ret = {}
if not os.path.exists(self.artifacts_properties_path):
save(self.artifacts_properties_path, "")
return ret
try:
contents = load(self.artifacts_properties_path)
for line in contents.splitlines():
if line and not line.strip().startswith("#"):
tmp = line.split("=", 1)
if len(tmp) != 2:
raise Exception()
name = tmp[0].strip()
value = tmp[1].strip()
ret[str(name)] = str(value)
return ret
except Exception:
raise ConanException("Invalid %s file!" % self.artifacts_properties_path)
@property
def config(self):
if not self._config:
self.initialize_config()
self._config = ConanClientConfigParser(self.conan_conf_path)
return self._config
@property
def new_config_path(self):
return os.path.join(self.cache_folder, "global.conf")
@property
def new_config(self):
""" this is the new global.conf to replace the old conan.conf that contains
configuration defined with the new syntax as in profiles, this config will be composed
to the profile ones and passed to the conanfiles.conf, which can be passed to collaborators
"""
if self._new_config is None:
self._new_config = ConfDefinition()
if os.path.exists(self.new_config_path):
text = load(self.new_config_path)
content = Template(text).render({"platform": platform, "os": os})
self._new_config.loads(content)
return self._new_config
@property
def localdb(self):
localdb_filename = os.path.join(self.cache_folder, LOCALDB)
encryption_key = os.getenv('CONAN_LOGIN_ENCRYPTION_KEY', None)
return LocalDB.create(localdb_filename, encryption_key=encryption_key)
@property
def conan_conf_path(self):
return os.path.join(self.cache_folder, CONAN_CONF)
@property
def profiles_path(self):
return os.path.join(self.cache_folder, PROFILES_FOLDER)
@property
def settings_path(self):
return os.path.join(self.cache_folder, CONAN_SETTINGS)
@property
def generators_path(self):
return os.path.join(self.cache_folder, GENERATORS_FOLDER)
@property
def default_profile_path(self):
if os.path.isabs(self.config.default_profile):
return self.config.default_profile
else:
return os.path.join(self.cache_folder, PROFILES_FOLDER, self.config.default_profile)
@property
def hooks_path(self):
"""
:return: Hooks folder in client cache
"""
return os.path.join(self.cache_folder, HOOKS_FOLDER)
@property
def default_profile(self):
self.initialize_default_profile()
default_profile, _ = read_profile(self.default_profile_path, os.getcwd(), self.profiles_path)
# Mix profile settings with environment
mixed_settings = _mix_settings_with_env(default_profile.settings)
default_profile.settings = mixed_settings
return default_profile
@property
def settings(self):
"""Returns {setting: [value, ...]} defining all the possible
settings without values"""
self.initialize_settings()
content = load(self.settings_path)
return Settings.loads(content)
@property
def hooks(self):
"""Returns a list of hooks inside the hooks folder"""
hooks = []
for hook_name in os.listdir(self.hooks_path):
if os.path.isfile(hook_name) and hook_name.endswith(".py"):
hooks.append(hook_name[:-3])
return hooks
@property
def generators(self):
"""Returns a list of generator paths inside the generators folder"""
generators = []
if os.path.exists(self.generators_path):
for path in os.listdir(self.generators_path):
generator = os.path.join(self.generators_path, path)
if os.path.isfile(generator) and generator.endswith(".py"):
generators.append(generator)
return generators
def delete_empty_dirs(self, deleted_refs):
""" Method called by ConanRemover.remove() to clean up from the cache empty folders
:param deleted_refs: The recipe references that the remove() has been removed
"""
for ref in deleted_refs:
ref_path = self.package_layout(ref).base_folder()
for _ in range(4):
if os.path.exists(ref_path):
try: # Take advantage that os.rmdir does not delete non-empty dirs
os.rmdir(ref_path)
except OSError:
break # not empty
ref_path = os.path.dirname(ref_path)
def remove_locks(self):
folders = list_folder_subdirs(self._store_folder, 4)
for folder in folders:
conan_folder = os.path.join(self._store_folder, folder)
Lock.clean(conan_folder)
shutil.rmtree(os.path.join(conan_folder, "locks"), ignore_errors=True)
def get_template(self, template_name, user_overrides=False):
# TODO: It can be initialized only once together with the Conan app
loaders = [dict_loader]
if user_overrides:
loaders.insert(0, FileSystemLoader(os.path.join(self.cache_folder, 'templates')))
env = Environment(loader=ChoiceLoader(loaders),
autoescape=select_autoescape(['html', 'xml']))
return env.get_template(template_name)
def initialize_config(self):
if not os.path.exists(self.conan_conf_path):
save(self.conan_conf_path, normalize(get_default_client_conf()))
def reset_config(self):
if os.path.exists(self.conan_conf_path):
remove(self.conan_conf_path)
self.initialize_config()
def initialize_default_profile(self):
if not os.path.exists(self.default_profile_path):
self._output.writeln("Auto detecting your dev setup to initialize the "
"default profile (%s)" % self.default_profile_path,
Color.BRIGHT_YELLOW)
default_settings = detect_defaults_settings(self._output,
profile_path=self.default_profile_path)
self._output.writeln("Default settings", Color.BRIGHT_YELLOW)
self._output.writeln("\n".join(["\t%s=%s" % (k, v) for (k, v) in default_settings]),
Color.BRIGHT_YELLOW)
self._output.writeln("*** You can change them in %s ***" % self.default_profile_path,
Color.BRIGHT_MAGENTA)
self._output.writeln("*** Or override with -s compiler='other' -s ...s***\n\n",
Color.BRIGHT_MAGENTA)
default_profile = Profile()
tmp = OrderedDict(default_settings)
default_profile.update_settings(tmp)
save(self.default_profile_path, default_profile.dumps())
def reset_default_profile(self):
if os.path.exists(self.default_profile_path):
remove(self.default_profile_path)
self.initialize_default_profile()
def initialize_settings(self):
if not os.path.exists(self.settings_path):
save(self.settings_path, normalize(get_default_settings_yml()))
def reset_settings(self):
if os.path.exists(self.settings_path):
remove(self.settings_path)
self.initialize_settings()
def _mix_settings_with_env(settings):
"""Reads CONAN_ENV_XXXX variables from environment
and if it's defined uses these value instead of the default
from conf file. If you specify a compiler with ENV variable you
need to specify all the subsettings, the file defaulted will be
ignored"""
# FIXME: Conan 2.0. This should be removed, it only applies to default profile, not others
def get_env_value(name_):
env_name = "CONAN_ENV_%s" % name_.upper().replace(".", "_")
return os.getenv(env_name, None)
def get_setting_name(env_name):
return env_name[10:].lower().replace("_", ".")
ret = OrderedDict()
for name, value in settings.items():
if get_env_value(name):
ret[name] = get_env_value(name)
else:
# being a subsetting, if parent exist in env discard this, because
# env doesn't define this setting. EX: env=>Visual Studio but
# env doesn't define compiler.libcxx
if "." not in name or not get_env_value(name.split(".")[0]):
ret[name] = value
# Now read if there are more env variables
for env, value in sorted(os.environ.items()):
if env.startswith("CONAN_ENV_") and get_setting_name(env) not in ret:
ret[get_setting_name(env)] = value
return ret
| |
from __future__ import absolute_import
import django.test
import unittest
from django.core.management import call_command
from django.contrib.auth import get_user_model
from challenges.models import Challenge
from core.tests.setups import (GargantuanChallengeFactory,
ChallengeFactory, UserFactory,
TimeFixedGargantuanChallengeFactory,
InVotingPeriodGargantuanChallengeFactory,
EndedGargantuanChallengeFactory,
RobrechtClencherRoleFactory)
User = get_user_model()
class ChallengeTestCase(django.test.TestCase):
def setUp(self):
call_command('setup_allauth_social',
'github',
domain="http://localhost",
setting="test")
#GargantuanChallengeFactory()
def tearDown(self):
UserFactory.reset_sequence(1)
def test_get_unicode(self):
TimeFixedGargantuanChallengeFactory()
challenge = Challenge.objects.get(id=1)
self.assertEqual(challenge.__unicode__(),
"Challenge 1, created on Tue Feb 4 09:15:00 2014")
def test_get_absolute_url(self):
GargantuanChallengeFactory()
challenge = Challenge.objects.get(id=1)
self.assertEqual(challenge.get_absolute_url(), "/api/challenges/1/")
def test_get_clencher(self):
GargantuanChallengeFactory()
challenge = Challenge.objects.get(id=1)
self.assertEqual(challenge.get_clencher().user.email,
u"de.rouck.robrecht@gmail.com")
def test_get_jurors(self):
GargantuanChallengeFactory()
challenge = Challenge.objects.get(id=1)
self.assertEqual(
[juror.user.email for juror in challenge.get_jurors()],
[u'andy.slacker@gmail.com', u'fred.labot@gmail.com',
u'jason.jay@gmail.com'])
def test_get_challenge_period_end_datetime(self):
TimeFixedGargantuanChallengeFactory()
challenge = Challenge.objects.get(id=1)
self.assertEqual(challenge.get_challenge_period_end_datetime().ctime(),
'Tue Mar 4 09:15:00 2014')
# While:
self.assertEqual(challenge.creation_datetime.ctime(),
'Tue Feb 4 09:15:00 2014')
def test_get_voting_period_end_datetime(self):
TimeFixedGargantuanChallengeFactory()
challenge = Challenge.objects.get(id=1)
# Weird: why is there no space here in ctime format? Or rather why
# was there a space in
# challenge.get_challenge_period_end_datetime().ctime() ?
self.assertEqual(challenge.get_voting_period_end_datetime().ctime(),
'Tue Mar 11 09:15:00 2014')
# While:
self.assertEqual(challenge.creation_datetime.ctime(),
'Tue Feb 4 09:15:00 2014')
def test_in_voting_period(self):
InVotingPeriodGargantuanChallengeFactory()
challenge = Challenge.objects.get(id=1)
self.assertEqual(challenge.in_voting_period(), True)
def test_in_challenge_period(self):
GargantuanChallengeFactory()
challenge = Challenge.objects.get(id=1)
self.assertEqual(challenge.in_challenge_period(), True)
def test_has_challenge_ended(self):
EndedGargantuanChallengeFactory()
challenge = Challenge.objects.get(id=1)
self.assertEqual(challenge.has_ended(), True)
def test_get_juror_representation_number_as_juror(self):
EndedGargantuanChallengeFactory()
challenge = Challenge.objects.get(id=1)
juror_1 = challenge.get_jurors()[0]
self.assertEqual(challenge.get_juror_representation_number(juror_1), 1)
def test_get_juror_representation_number_as_non_juror_for_given_challenge(self):
EndedGargantuanChallengeFactory()
challenge = Challenge.objects.get(id=1)
clencher = challenge.get_clencher()
self.assertRaises(Exception,
challenge.get_juror_representation_number, clencher)
def test_get_vote_results(self):
EndedGargantuanChallengeFactory()
challenge = Challenge.objects.get(id=1)
self.assertEqual(challenge.get_vote_results(),
{'positive': 1, 'negative': 1, 'not_voted': 1})
# Need more tests for tested method
def test_has_majority_vote(self):
EndedGargantuanChallengeFactory()
challenge = Challenge.objects.get(id=1)
self.assertEqual(challenge.has_majority_vote(), False)
# Need more tests for tested method
def test_is_successful(self):
EndedGargantuanChallengeFactory()
challenge = Challenge.objects.get(id=1)
self.assertEqual(challenge.is_successful(), False)
def test_get_repo_branch_path_representation(self):
challenge = ChallengeFactory(repo_name="asiakas", branch_name="master")
self.assertEqual(challenge.get_repo_branch_path_representation(),
"asiakas/master")
def test_get_branch_main_url(self):
clencher_rob = RobrechtClencherRoleFactory()
challenge = clencher_rob.challenge
self.assertEqual(
challenge.get_branch_main_url(),
"https://github.com/RobrechtDR/gargantuan/tree/challenge")
@unittest.skip("The challenge method currently makes a request")
def test_get_commit_comparison_url(self):
challenge = GargantuanChallengeFactory(repo_name="asiakas",
branch_name="master",
start_commit="6b8c19067")
self.assertEqual(
challenge.get_commit_comparison_url(),
("https://github.com/RobrechtDR/asiakas/"
"compare/6b8c19067...b3e18963c3e1091134f5b4637aa198d196336ea9"))
def test_is_last_commit_different_from_start_commit(self):
challenge = GargantuanChallengeFactory(repo_name="asiakas",
branch_name="master",
start_commit="6b8c19067")
self.assertEqual(
challenge.is_last_commit_different_from_start_commit(),
True)
def test_user_has_role(self):
challenge = GargantuanChallengeFactory()
rob = challenge.get_clencher().user
self.assertEqual(challenge.user_has_role(rob), True)
class RoleTestCase(django.test.TestCase):
def setUp(self):
call_command('setup_allauth_social',
'github',
domain="http://localhost",
setting="test")
#GargantuanChallengeFactory()
def tearDown(self):
UserFactory.reset_sequence(1)
def test_get_unicode(self):
TimeFixedGargantuanChallengeFactory()
challenge = Challenge.objects.get(id=1)
clencher = challenge.get_clencher()
self.assertEqual(
clencher.__unicode__(),
("Clencher 'de.rouck.robrecht' of 'Challenge 1, "
"created on Tue Feb 4 09:15:00 2014'"))
def test_get_absolute_url(self):
GargantuanChallengeFactory()
challenge = Challenge.objects.get(id=1)
clencher = challenge.get_clencher()
self.assertEqual(clencher.get_absolute_url(), "/api/roles/1/")
def test_is_juror(self):
GargantuanChallengeFactory()
challenge = Challenge.objects.get(id=1)
juror = challenge.get_jurors()[0]
self.assertTrue(juror.is_juror())
clencher = challenge.get_clencher()
self.assertFalse(clencher.is_juror())
def test_clencher_cant_make_head_comment(self):
GargantuanChallengeFactory()
challenge = Challenge.objects.get(id=1)
clencher = challenge.get_clencher()
self.assertEqual(clencher.can_make_head_comment(), False)
def test_juror_cant_make_head_comment_before_end_of_challenge_period(self):
GargantuanChallengeFactory()
challenge = Challenge.objects.get(id=1)
jason = User.objects.get(email="jason.jay@gmail.com")
juror_jason = challenge.role_set.get(user=jason) # without head comment
self.assertEqual(juror_jason.can_make_head_comment(), False)
# While:
self.assertEqual(challenge.in_challenge_period(), True)
def test_juror_without_headcomments_can_make_head_comment(self):
InVotingPeriodGargantuanChallengeFactory()
challenge = Challenge.objects.get(id=1)
jason = User.objects.get(email="jason.jay@gmail.com")
juror_jason = challenge.role_set.get(user=jason) # without head comment
self.assertEqual(juror_jason.can_make_head_comment(), True)
def test_juror_with_comments_cant_make_head_comment(self):
GargantuanChallengeFactory()
challenge = Challenge.objects.get(id=1)
andy = User.objects.get(email="andy.slacker@gmail.com")
juror_andy = challenge.role_set.get(user=andy) # with head comment
self.assertEqual(juror_andy.can_make_head_comment(), False)
| |
# Copyright 2015-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Cloud Custodian Lambda Provisioning Support
docs/lambda.rst
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import abc
import base64
import hashlib
import importlib
import io
import json
import logging
import os
import time
import tempfile
import zipfile
from concurrent.futures import ThreadPoolExecutor
# Static event mapping to help simplify cwe rules creation
from c7n.exceptions import ClientError
from c7n.cwe import CloudWatchEvents
from c7n.logs_support import _timestamp_from_string
from c7n.utils import parse_s3, local_session
log = logging.getLogger('custodian.serverless')
class PythonPackageArchive(object):
"""Creates a zip file for python lambda functions.
:param tuple modules: the Python modules to add to the archive
Amazon doesn't give us straightforward docs here, only `an example
<http://docs.aws.amazon.com/lambda/latest/dg/with-s3-example-deployment-pkg.html#with-s3-example-deployment-pkg-python>`_,
from which we can infer that they simply unzip the file into a directory on
``sys.path``. So what we do is locate all of the ``modules`` specified, and
add all of the ``.py`` files we find for these modules to a zip file.
In addition to the modules specified during instantiation, you can add
arbitrary additional files to the archive using :py:func:`add_file` and
:py:func:`add_contents`. For example, since we only add ``*.py`` files for
you, you'll need to manually add files for any compiled extension modules
that your Lambda requires.
"""
def __init__(self, *modules):
self._temp_archive_file = tempfile.NamedTemporaryFile()
self._zip_file = zipfile.ZipFile(
self._temp_archive_file, mode='w',
compression=zipfile.ZIP_DEFLATED)
self._closed = False
self.add_modules(None, *modules)
@property
def path(self):
return self._temp_archive_file.name
@property
def size(self):
if not self._closed:
raise ValueError("Archive not closed, size not accurate")
return os.stat(self._temp_archive_file.name).st_size
def add_modules(self, ignore, *modules):
"""Add the named Python modules to the archive. For consistency's sake
we only add ``*.py`` files, not ``*.pyc``. We also don't add other
files, including compiled modules. You'll have to add such files
manually using :py:meth:`add_file`.
"""
for module_name in modules:
module = importlib.import_module(module_name)
if hasattr(module, '__path__'):
# https://docs.python.org/3/reference/import.html#module-path
for directory in module.__path__:
self.add_directory(directory, ignore)
if getattr(module, '__file__', None) is None:
# Likely a namespace package. Try to add *.pth files so
# submodules are importable under Python 2.7.
sitedir = list(module.__path__)[0].rsplit('/', 1)[0]
for filename in os.listdir(sitedir):
s = filename.startswith
e = filename.endswith
if s(module_name) and e('-nspkg.pth'):
self.add_file(os.path.join(sitedir, filename))
elif hasattr(module, '__file__'):
# https://docs.python.org/3/reference/import.html#__file__
path = module.__file__
if path.endswith('.pyc'):
_path = path[:-1]
if not os.path.isfile(_path):
raise ValueError(
'Could not find a *.py source file behind ' + path)
path = _path
if not path.endswith('.py'):
raise ValueError(
'We need a *.py source file instead of ' + path)
self.add_file(path)
def add_directory(self, path, ignore=None):
"""Add ``*.py`` files under the directory ``path`` to the archive.
"""
for root, dirs, files in os.walk(path):
arc_prefix = os.path.relpath(root, os.path.dirname(path))
for f in files:
dest_path = os.path.join(arc_prefix, f)
# ignore specific files
if ignore and ignore(dest_path):
continue
if f.endswith('.pyc') or f.endswith('.c'):
continue
f_path = os.path.join(root, f)
self.add_file(f_path, dest_path)
def add_file(self, src, dest=None):
"""Add the file at ``src`` to the archive.
If ``dest`` is ``None`` then it is added under just the original
filename. So ``add_file('foo/bar.txt')`` ends up at ``bar.txt`` in the
archive, while ``add_file('bar.txt', 'foo/bar.txt')`` ends up at
``foo/bar.txt``.
"""
dest = dest or os.path.basename(src)
with open(src, 'rb') as fp:
contents = fp.read()
self.add_contents(dest, contents)
def add_py_file(self, src, dest=None):
"""This is a special case of :py:meth:`add_file` that helps for adding
a ``py`` when a ``pyc`` may be present as well. So for example, if
``__file__`` is ``foo.pyc`` and you do:
.. code-block:: python
archive.add_py_file(__file__)
then this method will add ``foo.py`` instead if it exists, and raise
``IOError`` if it doesn't.
"""
src = src[:-1] if src.endswith('.pyc') else src
self.add_file(src, dest)
def add_contents(self, dest, contents):
"""Add file contents to the archive under ``dest``.
If ``dest`` is a path, it will be added compressed and world-readable
(user-writeable). You may also pass a :py:class:`~zipfile.ZipInfo` for
custom behavior.
"""
assert not self._closed, "Archive closed"
if not isinstance(dest, zipfile.ZipInfo):
dest = zinfo(dest) # see for some caveats
self._zip_file.writestr(dest, contents)
def close(self):
"""Close the zip file.
Note underlying tempfile is removed when archive is garbage collected.
"""
self._closed = True
self._zip_file.close()
log.debug(
"Created custodian serverless archive size: %0.2fmb",
(os.path.getsize(self._temp_archive_file.name) / (
1024.0 * 1024.0)))
return self
def remove(self):
"""Dispose of the temp file for garbage collection."""
if self._temp_archive_file:
self._temp_archive_file = None
def get_checksum(self, encoder=base64.b64encode, hasher=hashlib.sha256):
"""Return the b64 encoded sha256 checksum of the archive."""
assert self._closed, "Archive not closed"
with open(self._temp_archive_file.name, 'rb') as fh:
return encoder(checksum(fh, hasher())).decode('ascii')
def get_bytes(self):
"""Return the entire zip file as a byte string. """
assert self._closed, "Archive not closed"
return open(self._temp_archive_file.name, 'rb').read()
def get_reader(self):
"""Return a read-only :py:class:`~zipfile.ZipFile`."""
assert self._closed, "Archive not closed"
buf = io.BytesIO(self.get_bytes())
return zipfile.ZipFile(buf, mode='r')
def get_filenames(self):
"""Return a list of filenames in the archive."""
return [n.filename for n in self.get_reader().filelist]
def checksum(fh, hasher, blocksize=65536):
buf = fh.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = fh.read(blocksize)
return hasher.digest()
def custodian_archive(packages=None):
"""Create a lambda code archive for running custodian.
Lambda archive currently always includes `c7n` and
`pkg_resources`. Add additional packages in the mode block.
Example policy that includes additional packages
.. code-block:: yaml
policy:
name: lambda-archive-example
resource: s3
mode:
packages:
- botocore
packages: List of additional packages to include in the lambda archive.
"""
modules = {'c7n', 'pkg_resources'}
if packages:
modules = filter(None, modules.union(packages))
return PythonPackageArchive(*sorted(modules))
class LambdaManager(object):
""" Provides CRUD operations around lambda functions
"""
def __init__(self, session_factory, s3_asset_path=None):
self.session_factory = session_factory
self.client = self.session_factory().client('lambda')
self.s3_asset_path = s3_asset_path
def list_functions(self, prefix=None):
p = self.client.get_paginator('list_functions')
for rp in p.paginate():
for f in rp.get('Functions', []):
if not prefix:
yield f
elif f['FunctionName'].startswith(prefix):
yield f
def publish(self, func, alias=None, role=None, s3_uri=None):
result, changed = self._create_or_update(
func, role, s3_uri, qualifier=alias)
func.arn = result['FunctionArn']
if alias and changed:
func.alias = self.publish_alias(result, alias)
elif alias:
func.alias = "%s:%s" % (func.arn, alias)
else:
func.alias = func.arn
for e in func.get_events(self.session_factory):
if e.add(func):
log.debug(
"Added event source: %s to function: %s",
e, func.alias)
return result
def remove(self, func, alias=None):
for e in func.get_events(self.session_factory):
e.remove(func)
log.info("Removing lambda function %s", func.name)
try:
self.client.delete_function(FunctionName=func.name)
except ClientError as e:
if e.response['Error']['Code'] != 'ResourceNotFoundException':
raise
def metrics(self, funcs, start, end, period=5 * 60):
def func_metrics(f):
metrics = local_session(self.session_factory).client('cloudwatch')
values = {}
for m in ('Errors', 'Invocations', 'Durations', 'Throttles'):
values[m] = metrics.get_metric_statistics(
Namespace="AWS/Lambda",
Dimensions=[{
'Name': 'FunctionName',
'Value': (
isinstance(f, dict) and f['FunctionName'] or f.name)}],
Statistics=["Sum"],
StartTime=start,
EndTime=end,
Period=period,
MetricName=m)['Datapoints']
return values
with ThreadPoolExecutor(max_workers=3) as w:
results = list(w.map(func_metrics, funcs))
for m, f in zip(results, funcs):
if isinstance(f, dict):
f['Metrics'] = m
return results
def logs(self, func, start, end):
logs = self.session_factory().client('logs')
group_name = "/aws/lambda/%s" % func.name
log.info("Fetching logs from group: %s" % group_name)
try:
logs.describe_log_groups(
logGroupNamePrefix=group_name)
except ClientError as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
return
raise
try:
log_streams = logs.describe_log_streams(
logGroupName=group_name,
orderBy="LastEventTime", limit=3, descending=True)
except ClientError as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
return
raise
start = _timestamp_from_string(start)
end = _timestamp_from_string(end)
for s in reversed(log_streams['logStreams']):
result = logs.get_log_events(
logGroupName=group_name,
logStreamName=s['logStreamName'],
startTime=start,
endTime=end,
)
for e in result['events']:
yield e
@staticmethod
def delta_function(old_config, new_config):
found = False
for k in new_config:
# Vpc needs special handling as a dict with lists
if k == 'VpcConfig' and k in old_config and new_config[k]:
if set(old_config[k]['SubnetIds']) != set(
new_config[k]['SubnetIds']):
found = True
elif set(old_config[k]['SecurityGroupIds']) != set(
new_config[k]['SecurityGroupIds']):
found = True
elif k not in old_config:
if k in LAMBDA_EMPTY_VALUES and LAMBDA_EMPTY_VALUES[k] == new_config[k]:
continue
found = True
elif new_config[k] != old_config[k]:
found = True
return found
@staticmethod
def diff_tags(old_tags, new_tags):
add = {}
remove = set()
for k, v in new_tags.items():
if k not in old_tags or old_tags[k] != v:
add[k] = v
for k in old_tags:
if k not in new_tags:
remove.add(k)
return add, list(remove)
def _create_or_update(self, func, role=None, s3_uri=None, qualifier=None):
role = func.role or role
assert role, "Lambda function role must be specified"
archive = func.get_archive()
existing = self.get(func.name, qualifier)
if s3_uri:
# TODO: support versioned buckets
bucket, key = self._upload_func(s3_uri, func, archive)
code_ref = {'S3Bucket': bucket, 'S3Key': key}
else:
code_ref = {'ZipFile': archive.get_bytes()}
changed = False
if existing:
old_config = existing['Configuration']
if archive.get_checksum() != old_config[
'CodeSha256'].encode('ascii'):
log.debug("Updating function %s code", func.name)
params = dict(FunctionName=func.name, Publish=True)
params.update(code_ref)
result = self.client.update_function_code(**params)
changed = True
# TODO/Consider also set publish above to false, and publish
# after configuration change?
new_config = func.get_config()
new_config['Role'] = role
new_tags = new_config.pop('Tags', {})
if self.delta_function(old_config, new_config):
log.debug("Updating function: %s config" % func.name)
result = self.client.update_function_configuration(**new_config)
changed = True
# tag dance
base_arn = old_config['FunctionArn']
if base_arn.count(':') > 6: # trim version/alias
base_arn = base_arn.rsplit(':', 1)[0]
old_tags = self.client.list_tags(Resource=base_arn)['Tags']
tags_to_add, tags_to_remove = self.diff_tags(old_tags, new_tags)
if tags_to_add:
log.debug("Adding/updating tags: %s config" % func.name)
self.client.tag_resource(
Resource=base_arn, Tags=tags_to_add)
if tags_to_remove:
log.debug("Removing tags: %s config" % func.name)
self.client.untag_resource(
Resource=base_arn, TagKeys=tags_to_remove)
if not changed:
result = old_config
else:
log.info('Publishing custodian policy lambda function %s', func.name)
params = func.get_config()
params.update({'Publish': True, 'Code': code_ref, 'Role': role})
result = self.client.create_function(**params)
changed = True
return result, changed
def _upload_func(self, s3_uri, func, archive):
from boto3.s3.transfer import S3Transfer, TransferConfig
_, bucket, key_prefix = parse_s3(s3_uri)
key = "%s/%s" % (key_prefix, func.name)
transfer = S3Transfer(
self.session_factory().client('s3'),
config=TransferConfig(
multipart_threshold=1024 * 1024 * 4))
transfer.upload_file(
archive.path,
bucket=bucket,
key=key,
extra_args={
'ServerSideEncryption': 'AES256'})
return bucket, key
def publish_alias(self, func_data, alias):
"""Create or update an alias for the given function.
"""
if not alias:
return func_data['FunctionArn']
func_name = func_data['FunctionName']
func_version = func_data['Version']
exists = resource_exists(
self.client.get_alias, FunctionName=func_name, Name=alias)
if not exists:
log.debug("Publishing custodian lambda alias %s", alias)
alias_result = self.client.create_alias(
FunctionName=func_name,
Name=alias,
FunctionVersion=func_version)
else:
if (exists['FunctionVersion'] == func_version and
exists['Name'] == alias):
return exists['AliasArn']
log.debug('Updating custodian lambda alias %s', alias)
alias_result = self.client.update_alias(
FunctionName=func_name,
Name=alias,
FunctionVersion=func_version)
return alias_result['AliasArn']
def get(self, func_name, qualifier=None):
params = {'FunctionName': func_name}
if qualifier:
params['Qualifier'] = qualifier
return resource_exists(
self.client.get_function, **params)
def resource_exists(op, NotFound="ResourceNotFoundException", *args, **kw):
try:
return op(*args, **kw)
except ClientError as e:
if e.response['Error']['Code'] == NotFound:
return False
raise
class AbstractLambdaFunction:
"""Abstract base class for lambda functions."""
__metaclass__ = abc.ABCMeta
alias = None
@abc.abstractproperty
def name(self):
"""Name for the lambda function"""
@abc.abstractproperty
def runtime(self):
""" """
@abc.abstractproperty
def description(self):
""" """
@abc.abstractproperty
def handler(self):
""" """
@abc.abstractproperty
def memory_size(self):
""" """
@abc.abstractproperty
def timeout(self):
""" """
@abc.abstractproperty
def role(self):
""" """
@abc.abstractproperty
def subnets(self):
""" """
@abc.abstractproperty
def security_groups(self):
""" """
@abc.abstractproperty
def dead_letter_config(self):
""" """
@abc.abstractproperty
def environment(self):
""" """
@abc.abstractproperty
def kms_key_arn(self):
""" """
@abc.abstractproperty
def tracing_config(self):
""" """
@abc.abstractproperty
def tags(self):
""" """
@abc.abstractmethod
def get_events(self, session_factory):
"""event sources that should be bound to this lambda."""
@abc.abstractmethod
def get_archive(self):
"""Return the lambda distribution archive object."""
def get_config(self):
conf = {
'FunctionName': self.name,
'MemorySize': self.memory_size,
'Role': self.role,
'Description': self.description,
'Runtime': self.runtime,
'Handler': self.handler,
'Timeout': self.timeout,
'TracingConfig': self.tracing_config,
'KMSKeyArn': self.kms_key_arn,
'DeadLetterConfig': self.dead_letter_config,
'VpcConfig': LAMBDA_EMPTY_VALUES['VpcConfig'],
'Tags': self.tags}
if self.environment['Variables']:
conf['Environment'] = self.environment
if self.subnets and self.security_groups:
conf['VpcConfig'] = {
'SubnetIds': self.subnets,
'SecurityGroupIds': self.security_groups}
return conf
LAMBDA_EMPTY_VALUES = {
'Environment': {'Variables': {}},
'DeadLetterConfig': {},
'TracingConfig': {'Mode': 'PassThrough'},
'VpcConfig': {'SubnetIds': [], 'SecurityGroupIds': []},
'KMSKeyArn': '',
}
class LambdaFunction(AbstractLambdaFunction):
def __init__(self, func_data, archive):
self.func_data = func_data
required = set((
'name', 'handler', 'memory_size',
'timeout', 'role', 'runtime',
'description'))
missing = required.difference(func_data)
if missing:
raise ValueError("Missing required keys %s" % " ".join(missing))
self.archive = archive
@property
def name(self):
return self.func_data['name']
@property
def description(self):
return self.func_data['description']
@property
def handler(self):
return self.func_data['handler']
@property
def memory_size(self):
return self.func_data['memory_size']
@property
def timeout(self):
return self.func_data['timeout']
@property
def runtime(self):
return self.func_data['runtime']
@property
def role(self):
return self.func_data['role']
@property
def security_groups(self):
return self.func_data.get('security_groups', None)
@property
def subnets(self):
return self.func_data.get('subnets', None)
@property
def dead_letter_config(self):
return self.func_data.get(
'dead_letter_config', LAMBDA_EMPTY_VALUES['DeadLetterConfig'])
@property
def environment(self):
return self.func_data.get(
'environment', LAMBDA_EMPTY_VALUES['Environment'])
@property
def kms_key_arn(self):
return self.func_data.get('kms_key_arn', '')
@property
def tracing_config(self):
# Default
return self.func_data.get(
'tracing_config', LAMBDA_EMPTY_VALUES['TracingConfig'])
@property
def tags(self):
return self.func_data.get('tags', {})
def get_events(self, session_factory):
return self.func_data.get('events', ())
def get_archive(self):
return self.archive
PolicyHandlerTemplate = """\
from c7n import handler
def run(event, context):
return handler.dispatch_event(event, context)
"""
class PolicyLambda(AbstractLambdaFunction):
"""Wraps a custodian policy to turn it into a lambda function.
"""
handler = "custodian_policy.run"
def __init__(self, policy):
self.policy = policy
self.archive = custodian_archive(packages=self.packages)
@property
def name(self):
prefix = self.policy.data['mode'].get('function-prefix', 'custodian-')
return "%s%s" % (prefix, self.policy.name)
@property
def description(self):
return self.policy.data.get(
'description', 'cloud-custodian lambda policy')
@property
def role(self):
return self.policy.data['mode'].get('role', '')
@property
def runtime(self):
return self.policy.data['mode'].get('runtime', 'python2.7')
@property
def memory_size(self):
return self.policy.data['mode'].get('memory', 512)
@property
def timeout(self):
return self.policy.data['mode'].get('timeout', 60)
@property
def security_groups(self):
return self.policy.data['mode'].get('security_groups', None)
@property
def subnets(self):
return self.policy.data['mode'].get('subnets', None)
@property
def dead_letter_config(self):
return self.policy.data['mode'].get(
'dead_letter_config', LAMBDA_EMPTY_VALUES['DeadLetterConfig'])
@property
def environment(self):
return self.policy.data['mode'].get(
'environment', LAMBDA_EMPTY_VALUES['Environment'])
@property
def kms_key_arn(self):
return self.policy.data['mode'].get('kms_key_arn', '')
@property
def tracing_config(self):
# Default
return self.policy.data['mode'].get(
'tracing_config', {'Mode': 'PassThrough'})
@property
def tags(self):
return self.policy.data['mode'].get('tags', {})
@property
def packages(self):
return self.policy.data['mode'].get('packages')
def get_events(self, session_factory):
events = []
if self.policy.data['mode']['type'] == 'config-rule':
events.append(
ConfigRule(self.policy.data['mode'], session_factory))
else:
events.append(
CloudWatchEventSource(
self.policy.data['mode'], session_factory))
return events
def get_archive(self):
self.archive.add_contents(
'config.json', json.dumps(
{'policies': [self.policy.data]}, indent=2))
self.archive.add_contents('custodian_policy.py', PolicyHandlerTemplate)
self.archive.close()
return self.archive
def zinfo(fname):
"""Amazon lambda exec environment setup can break itself
if zip files aren't constructed a particular way.
ie. It respects file perm attributes from the zip including
those that prevent lambda from working. Namely lambda
extracts code as one user, and executes code as a different
user. Without permissions for the executing user to read
the file the lambda function is broken.
Python's default zipfile.writestr does a 0600 perm which
we modify here as a workaround.
"""
info = zipfile.ZipInfo(fname)
# Grant other users permissions to read
# http://unix.stackexchange.com/questions/14705/
info.external_attr = 0o644 << 16
return info
class CloudWatchEventSource(object):
"""Subscribe a lambda to cloud watch events.
Cloud watch events supports a number of different event
sources, from periodic timers with cron syntax, to
real time instance state notifications, cloud trail
events, and realtime asg membership changes.
Event Pattern for Instance State
.. code-block:: json
{
"source": ["aws.ec2"],
"detail-type": ["EC2 Instance State-change Notification"],
"detail": { "state": ["pending"]}
}
Event Pattern for Cloud Trail API
.. code-block:: json
{
"detail-type": ["AWS API Call via CloudTrail"],
"detail": {
"eventSource": ["s3.amazonaws.com"],
"eventName": ["CreateBucket", "DeleteBucket"]
}
}
"""
ASG_EVENT_MAPPING = {
'launch-success': 'EC2 Instance Launch Successful',
'launch-failure': 'EC2 Instance Launch Unsuccessful',
'terminate-success': 'EC2 Instance Terminate Successful',
'terminate-failure': 'EC2 Instance Terminate Unsuccessful'}
def __init__(self, data, session_factory, prefix="custodian-"):
self.session_factory = session_factory
self.session = session_factory()
self.client = self.session.client('events')
self.data = data
self.prefix = prefix
def _make_notification_id(self, function_name):
if not function_name.startswith(self.prefix):
return "%s%s" % (self.prefix, function_name)
return function_name
def get(self, rule_name):
return resource_exists(
self.client.describe_rule,
Name=self._make_notification_id(rule_name))
@staticmethod
def delta(src, tgt):
"""Given two cwe rules determine if the configuration is the same.
Name is already implied.
"""
for k in ['State', 'EventPattern', 'ScheduleExpression']:
if src.get(k) != tgt.get(k):
return True
return False
def __repr__(self):
return "<CWEvent Type:%s Events:%s>" % (
self.data.get('type'),
', '.join(map(str, self.data.get('events', []))))
def resolve_cloudtrail_payload(self, payload):
sources = self.data.get('sources', [])
events = []
for e in self.data.get('events'):
if not isinstance(e, dict):
events.append(e)
event_info = CloudWatchEvents.get(e)
if event_info is None:
continue
else:
event_info = e
events.append(e['event'])
sources.append(event_info['source'])
payload['detail'] = {
'eventSource': list(set(sources)),
'eventName': events}
def render_event_pattern(self):
event_type = self.data.get('type')
payload = {}
if event_type == 'cloudtrail':
payload['detail-type'] = ['AWS API Call via CloudTrail']
self.resolve_cloudtrail_payload(payload)
if event_type == 'cloudtrail':
if 'signin.amazonaws.com' in payload['detail']['eventSource']:
payload['detail-type'] = ['AWS Console Sign In via CloudTrail']
elif event_type == 'guard-duty':
payload['source'] = ['aws.guardduty']
payload['detail-type'] = ['GuardDuty Finding']
if 'resource-filter' in self.data:
payload.update({
'detail': {'resource': {'resourceType': [self.data['resource-filter']]}}})
elif event_type == "ec2-instance-state":
payload['source'] = ['aws.ec2']
payload['detail-type'] = [
"EC2 Instance State-change Notification"]
# Technically could let empty be all events, but likely misconfig
payload['detail'] = {"state": self.data.get('events', [])}
elif event_type == "asg-instance-state":
payload['source'] = ['aws.autoscaling']
events = []
for e in self.data.get('events', []):
events.append(self.ASG_EVENT_MAPPING.get(e, e))
payload['detail-type'] = events
elif event_type == 'periodic':
pass
else:
raise ValueError(
"Unknown lambda event source type: %s" % event_type)
if not payload:
return None
return json.dumps(payload)
def add(self, func):
params = dict(
Name=func.name, Description=func.description, State='ENABLED')
pattern = self.render_event_pattern()
if pattern:
params['EventPattern'] = pattern
schedule = self.data.get('schedule')
if schedule:
params['ScheduleExpression'] = schedule
rule = self.get(func.name)
if rule and self.delta(rule, params):
log.debug("Updating cwe rule for %s" % self)
response = self.client.put_rule(**params)
elif not rule:
log.debug("Creating cwe rule for %s" % (self))
response = self.client.put_rule(**params)
else:
response = {'RuleArn': rule['Arn']}
try:
self.session.client('lambda').add_permission(
FunctionName=func.name,
StatementId=func.name,
SourceArn=response['RuleArn'],
Action='lambda:InvokeFunction',
Principal='events.amazonaws.com')
log.debug('Added lambda invoke cwe rule permission')
except ClientError as e:
if e.response['Error']['Code'] != 'ResourceConflictException':
raise
# Add Targets
found = False
response = self.client.list_targets_by_rule(Rule=func.name)
# CWE seems to be quite picky about function arns (no aliases/versions)
func_arn = func.arn
if func_arn.count(':') > 6:
func_arn, version = func_arn.rsplit(':', 1)
for t in response['Targets']:
if func_arn == t['Arn']:
found = True
if found:
return
log.debug('Creating cwe rule target for %s on func:%s' % (
self, func_arn))
self.client.put_targets(
Rule=func.name, Targets=[{"Id": func.name, "Arn": func_arn}])
return True
def update(self, func):
self.add(func)
def pause(self, func):
try:
self.client.disable_rule(Name=func.name)
except Exception:
pass
def resume(self, func):
try:
self.client.enable_rule(Name=func.name)
except Exception:
pass
def remove(self, func):
if self.get(func.name):
log.info("Removing cwe targets and rule %s", func.name)
try:
targets = self.client.list_targets_by_rule(
Rule=func.name)['Targets']
self.client.remove_targets(
Rule=func.name,
Ids=[t['Id'] for t in targets])
except ClientError as e:
log.warning(
"Could not remove targets for rule %s error: %s",
func.name, e)
self.client.delete_rule(Name=func.name)
class BucketLambdaNotification(object):
""" Subscribe a lambda to bucket notifications directly. """
def __init__(self, data, session_factory, bucket):
self.data = data
self.session_factory = session_factory
self.session = session_factory()
self.bucket = bucket
def delta(self, src, tgt):
for k in ['Id', 'LambdaFunctionArn', 'Events', 'Filters']:
if src.get(k) != tgt.get(k):
return True
return False
def _get_notifies(self, s3, func):
notifies = s3.get_bucket_notification_configuration(
Bucket=self.bucket['Name'])
found = False
for f in notifies.get('LambdaFunctionConfigurations', []):
if f['Id'] != func.name:
continue
found = f
return notifies, found
def add(self, func):
s3 = self.session.client('s3')
notifies, found = self._get_notifies(s3, func)
notifies.pop('ResponseMetadata', None)
func_arn = func.arn
if func_arn.rsplit(':', 1)[-1].isdigit():
func_arn = func_arn.rsplit(':', 1)[0]
n_params = {
'Id': func.name,
'LambdaFunctionArn': func_arn,
'Events': self.data.get('events', ['s3:ObjectCreated:*'])}
if self.data.get('filters'):
n_params['Filters'] = {
'Key': {'FilterRules': self.filters}}
if found:
if self.delta(found, n_params):
notifies['LambdaFunctionConfigurations'].remove(found)
else:
log.info("Bucket lambda notification present")
return
lambda_client = self.session.client('lambda')
params = dict(
FunctionName=func.name,
StatementId=self.bucket['Name'],
Action='lambda:InvokeFunction',
Principal='s3.amazonaws.com')
if self.data.get('account_s3'):
params['SourceAccount'] = self.data['account_s3']
params['SourceArn'] = 'arn:aws:s3:::*'
else:
params['SourceArn'] = 'arn:aws:s3:::%' % self.bucket['Name']
try:
lambda_client.add_permission(**params)
except ClientError as e:
if e.response['Error']['Code'] != 'ResourceConflictException':
raise
notifies.setdefault('LambdaFunctionConfigurations', []).append(n_params)
s3.put_bucket_notification_configuration(
Bucket=self.bucket['Name'], NotificationConfiguration=notifies)
return True
def remove(self, func):
s3 = self.session.client('s3')
notifies, found = self._get_notifies(s3, func)
if not found:
return
lambda_client = self.session.client('lambda')
try:
response = lambda_client.remove_permission(
FunctionName=func['FunctionName'],
StatementId=self.bucket['Name'])
log.debug("Removed lambda permission result: %s" % response)
except ClientError as e:
if e.response['Error']['Code'] != 'ResourceNotFoundException':
raise
notifies['LambdaFunctionConfigurations'].remove(found)
s3.put_bucket_notification_configuration(
Bucket=self.bucket['Name'],
NotificationConfiguration=notifies)
class CloudWatchLogSubscription(object):
""" Subscribe a lambda to a log group[s]
"""
iam_delay = 1.5
def __init__(self, session_factory, log_groups, filter_pattern):
self.log_groups = log_groups
self.filter_pattern = filter_pattern
self.session_factory = session_factory
self.session = session_factory()
self.client = self.session.client('logs')
def add(self, func):
lambda_client = self.session.client('lambda')
for group in self.log_groups:
log.info(
"Creating subscription filter for %s" % group['logGroupName'])
region = group['arn'].split(':', 4)[3]
try:
lambda_client.add_permission(
FunctionName=func.name,
StatementId=group['logGroupName'][1:].replace('/', '-'),
SourceArn=group['arn'],
Action='lambda:InvokeFunction',
Principal='logs.%s.amazonaws.com' % region)
log.debug("Added lambda ipo nvoke log group permission")
# iam eventual consistency and propagation
time.sleep(self.iam_delay)
except ClientError as e:
if e.response['Error']['Code'] != 'ResourceConflictException':
raise
# Consistent put semantics / ie no op if extant
self.client.put_subscription_filter(
logGroupName=group['logGroupName'],
filterName=func.name,
filterPattern=self.filter_pattern,
destinationArn=func.alias or func.arn)
def remove(self, func):
lambda_client = self.session.client('lambda')
for group in self.log_groups:
try:
response = lambda_client.remove_permission(
FunctionName=func.name,
StatementId=group['logGroupName'][1:].replace('/', '-'))
log.debug("Removed lambda permission result: %s" % response)
except ClientError as e:
if e.response['Error']['Code'] != 'ResourceNotFoundException':
raise
try:
response = self.client.delete_subscription_filter(
logGroupName=group['logGroupName'], filterName=func.name)
log.debug("Removed subscription filter from: %s",
group['logGroupName'])
except ClientError as e:
if e.response['Error']['Code'] != 'ResourceNotFoundException':
raise
class SQSSubscription(object):
""" Subscribe a lambda to one or more SQS queues.
"""
def __init__(self, session_factory, queue_arns, batch_size=10):
self.queue_arns = queue_arns
self.session_factory = session_factory
self.batch_size = batch_size
def add(self, func):
client = local_session(self.session_factory).client('lambda')
event_mappings = {
m['EventSourceArn']: m for m in client.list_event_source_mappings(
FunctionName=func.name).get('EventSourceMappings', ())}
modified = False
for queue_arn in self.queue_arns:
mapping = None
if queue_arn in event_mappings:
mapping = event_mappings[queue_arn]
if (mapping['State'] == 'Enabled' or
mapping['BatchSize'] != self.batch_size):
continue
modified = True
else:
modified = True
if not modified:
return modified
if mapping is not None:
log.info(
"Updating subscription %s on %s", func.name, queue_arn)
client.update_event_source_mapping(
UUID=mapping['UUID'],
Enabled=True,
BatchSize=self.batch_size)
else:
log.info("Subscribing %s to %s", func.name, queue_arn)
client.create_event_source_mapping(
FunctionName=func.name,
EventSourceArn=queue_arn,
BatchSize=self.batch_size)
return modified
def remove(self, func):
client = local_session(self.session_factory).client('lambda')
event_mappings = {
m['EventSourceArn']: m for m in client.list_event_source_mappings(
FunctionName=func.name).get('EventSourceMappings', ())}
for queue_arn in self.queue_arns:
if queue_arn not in event_mappings:
continue
client.delete_event_source_mapping(
UUID=event_mappings[queue_arn]['UUID'])
class SNSSubscription(object):
""" Subscribe a lambda to one or more SNS topics.
"""
iam_delay = 1.5
def __init__(self, session_factory, topic_arns):
self.topic_arns = topic_arns
self.session_factory = session_factory
@staticmethod
def _parse_arn(arn):
parts = arn.split(':')
region, topic_name = parts[3], parts[5]
statement_id = 'sns-topic-' + topic_name
return region, topic_name, statement_id
def add(self, func):
session = local_session(self.session_factory)
lambda_client = session.client('lambda')
for arn in self.topic_arns:
region, topic_name, statement_id = self._parse_arn(arn)
log.info("Subscribing %s to %s" % (func.name, topic_name))
# Add permission to lambda for sns invocation.
try:
lambda_client.add_permission(
FunctionName=func.name,
StatementId='sns-topic-' + topic_name,
SourceArn=arn,
Action='lambda:InvokeFunction',
Principal='sns.amazonaws.com')
log.debug("Added permission for sns to invoke lambda")
# iam eventual consistency and propagation
time.sleep(self.iam_delay)
except ClientError as e:
if e.response['Error']['Code'] != 'ResourceConflictException':
raise
# Subscribe the lambda to the topic, idempotent
sns_client = session.client('sns')
sns_client.subscribe(
TopicArn=arn, Protocol='lambda', Endpoint=func.arn)
def remove(self, func):
session = local_session(self.session_factory)
lambda_client = session.client('lambda')
sns_client = session.client('sns')
for topic_arn in self.topic_arns:
region, topic_name, statement_id = self._parse_arn(topic_arn)
try:
response = lambda_client.remove_permission(
FunctionName=func.name,
StatementId=statement_id)
log.debug("Removed lambda permission result: %s" % response)
except ClientError as e:
if e.response['Error']['Code'] != 'ResourceNotFoundException':
raise
paginator = sns_client.get_paginator('list_subscriptions_by_topic')
class Done(Exception):
pass
try:
for page in paginator.paginate(TopicArn=topic_arn):
for subscription in page['Subscriptions']:
if subscription['Endpoint'] != func.arn:
continue
try:
response = sns_client.unsubscribe(
SubscriptionArn=subscription['SubscriptionArn'])
log.debug("Unsubscribed %s from %s" %
(func.name, topic_name))
except ClientError as e:
code = e.response['Error']['Code']
if code != 'ResourceNotFoundException':
raise
raise Done # break out of both for loops
except Done:
pass
class BucketSNSNotification(SNSSubscription):
""" Subscribe a lambda to bucket notifications via SNS. """
def __init__(self, session_factory, bucket, topic=None):
# NB: We are overwriting __init__ vs. extending.
self.session_factory = session_factory
self.session = session_factory()
self.topic_arns = self.get_topic(bucket) if topic is None else [topic]
self.client = self.session.client('sns')
def get_topic(self, bucket):
session = local_session(self.session_factory)
sns = session.client('sns')
s3 = session.client('s3')
notifies = bucket['Notification']
if 'TopicConfigurations' not in notifies:
notifies['TopicConfigurations'] = []
all_topics = notifies['TopicConfigurations']
topic_arns = [t['TopicArn'] for t in all_topics
if 's3:ObjectCreated:*' in t['Events']]
if not topic_arns:
# No suitable existing topic. Create one.
topic_arn = sns.create_topic(Name=bucket['Name'])['TopicArn']
policy = {
'Statement': [{
'Action': 'SNS:Publish',
'Effect': 'Allow',
'Resource': topic_arn,
'Principal': {'Service': 's3.amazonaws.com'}}]}
sns.set_topic_attributes(
TopicArn=topic_arn,
AttributeName='Policy',
AttributeValue=json.dumps(policy))
notifies['TopicConfigurations'].append({
'TopicArn': topic_arn,
'Events': ['s3:ObjectCreated:*']})
s3.put_bucket_notification_configuration(Bucket=bucket['Name'],
NotificationConfiguration=notifies)
topic_arns = [topic_arn]
return topic_arns
class ConfigRule(object):
"""Use a lambda as a custom config rule.
"""
def __init__(self, data, session_factory):
self.data = data
self.session_factory = session_factory
self.session = session_factory()
self.client = self.session.client('config')
def __repr__(self):
return "<ConfigRule>"
def get_rule_params(self, func):
# config does not support versions/aliases on lambda funcs
func_arn = func.arn
if func_arn.count(':') > 6:
func_arn, version = func_arn.rsplit(':', 1)
params = dict(
ConfigRuleName=func.name,
Description=func.description,
Source={
'Owner': 'CUSTOM_LAMBDA',
'SourceIdentifier': func_arn,
'SourceDetails': [{
'EventSource': 'aws.config',
'MessageType': 'ConfigurationItemChangeNotification'}]
}
)
if isinstance(func, PolicyLambda):
manager = func.policy.load_resource_manager()
if hasattr(manager.get_model(), 'config_type'):
config_type = manager.get_model().config_type
else:
raise Exception("You may have attempted to deploy a config "
"based lambda function with an unsupported config type. "
"The most recent AWS config types are here: http://docs.aws"
".amazon.com/config/latest/developerguide/resource"
"-config-reference.html.")
params['Scope'] = {
'ComplianceResourceTypes': [config_type]}
else:
params['Scope']['ComplianceResourceTypes'] = self.data.get(
'resource-types', ())
return params
def get(self, rule_name):
rules = resource_exists(
self.client.describe_config_rules,
ConfigRuleNames=[rule_name],
NotFound="NoSuchConfigRuleException")
if not rules:
return rules
return rules['ConfigRules'][0]
@staticmethod
def delta(rule, params):
# doesn't seem like we have anything mutable at the moment,
# since we restrict params, maybe reusing the same policy name
# with a different resource type.
if rule['Scope'] != params['Scope']:
return True
if rule['Source'] != params['Source']:
return True
if rule.get('Description', '') != rule.get('Description', ''):
return True
return False
def add(self, func):
rule = self.get(func.name)
params = self.get_rule_params(func)
if rule and self.delta(rule, params):
log.debug("Updating config rule for %s" % self)
rule.update(params)
return self.client.put_config_rule(ConfigRule=rule)
elif rule:
log.debug("Config rule up to date")
return
try:
self.session.client('lambda').add_permission(
FunctionName=func.name,
StatementId=func.name,
SourceAccount=func.arn.split(':')[4],
Action='lambda:InvokeFunction',
Principal='config.amazonaws.com')
except ClientError as e:
if e.response['Error']['Code'] != 'ResourceConflictException':
raise
log.debug("Adding config rule for %s" % func.name)
return self.client.put_config_rule(ConfigRule=params)
def remove(self, func):
rule = self.get(func.name)
if not rule:
return
log.info("Removing config rule for %s", func.name)
try:
self.client.delete_config_rule(
ConfigRuleName=func.name)
except ClientError as e:
if e.response['Error']['Code'] != 'ResourceNotFoundException':
raise
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from apic_ml2.neutron.db import port_ha_ipaddress_binding as ha_ip_db
import mock
import netaddr
from neutron.common import config # noqa
from neutron import context
from neutron import manager
from oslo_config import cfg
from gbpservice.neutron.services.servicechain.plugins.ncp import (
plugin as ncp_plugin)
from gbpservice.neutron.services.servicechain.plugins.ncp import model
from gbpservice.neutron.tests.unit.services.grouppolicy import (
test_apic_mapping as test_apic)
from gbpservice.neutron.tests.unit.services.grouppolicy import (
test_resource_mapping as test_rmd)
from gbpservice.neutron.tests.unit.services.servicechain.ncp import (
test_ncp_plugin as base)
from gbpservice.neutron.tests.unit.services.servicechain.ncp import (
test_tscp_resource_mapping as test_tscp_rmd)
class ApicMappingStitchingPlumberGBPTestCase(
test_apic.ApicMappingTestCase):
def setUp(self, plumber='stitching_plumber'):
cfg.CONF.set_override(
'extension_drivers', ['proxy_group'], group='group_policy')
cfg.CONF.set_override('node_plumber', plumber,
group='node_composition_plugin')
super(ApicMappingStitchingPlumberGBPTestCase, self).setUp(
sc_plugin=base.SC_PLUGIN_KLASS)
def get_plumbing_info(context):
return test_tscp_rmd.info_mapping.get(
context.current_profile['service_type'])
self.node_driver = self.sc_plugin.driver_manager.ordered_drivers[0].obj
self.node_driver.get_plumbing_info = get_plumbing_info
self.mgr = self.driver.apic_manager
def get_plumbing_info(context):
return test_tscp_rmd.info_mapping.get(
context.current_profile['service_type'])
self.node_driver = self.sc_plugin.driver_manager.ordered_drivers[0].obj
self.node_driver.get_plumbing_info = get_plumbing_info
@property
def sc_plugin(self):
plugins = manager.NeutronManager.get_service_plugins()
servicechain_plugin = plugins.get('SERVICECHAIN')
return servicechain_plugin
class TestPolicyRuleSet(ApicMappingStitchingPlumberGBPTestCase,
test_apic.TestPolicyRuleSet):
pass
class TestPolicyRule(ApicMappingStitchingPlumberGBPTestCase,
test_apic.TestPolicyRule):
pass
class TestExternalSegment(ApicMappingStitchingPlumberGBPTestCase,
test_apic.TestExternalSegment):
pass
class TestExternalPolicy(ApicMappingStitchingPlumberGBPTestCase,
test_apic.TestExternalPolicy):
pass
class TestApicChains(ApicMappingStitchingPlumberGBPTestCase,
base.NodeCompositionPluginTestMixin):
def _assert_proper_chain_instance(self, sc_instance, provider_ptg_id,
policy_rule_set_id, scs_id_list,
classifier_id=None):
self.assertEqual(sc_instance['provider_ptg_id'], provider_ptg_id)
self.assertEqual(sc_instance['consumer_ptg_id'], 'N/A')
self.assertEqual(scs_id_list, sc_instance['servicechain_specs'])
provider = self.show_policy_target_group(
provider_ptg_id)['policy_target_group']
self.assertEqual(sc_instance['tenant_id'], provider['tenant_id'])
if classifier_id:
self.assertEqual(sc_instance['classifier_id'], classifier_id)
def test_classifier_update_to_chain(self):
scs_id = self._create_servicechain_spec(node_types=['FIREWALL'])
_, classifier_id, policy_rule_id = self._create_tcp_redirect_rule(
"20:90", scs_id)
policy_rule_set = self.create_policy_rule_set(
name="c1", policy_rules=[policy_rule_id])
policy_rule_set_id = policy_rule_set['policy_rule_set']['id']
provider_ptg_id, consumer_ptg_id = self._create_provider_consumer_ptgs(
policy_rule_set_id)
sc_instances = self._list_service_chains()
# We should have one service chain instance created now
self.assertEqual(len(sc_instances['servicechain_instances']), 1)
sc_instance = sc_instances['servicechain_instances'][0]
self._assert_proper_chain_instance(sc_instance, provider_ptg_id,
consumer_ptg_id, [scs_id])
with mock.patch.object(
ncp_plugin.NodeCompositionPlugin,
'notify_chain_parameters_updated') as notify_chain_update:
# Update classifier and verify instance is updated
self.update_policy_classifier(classifier_id, port_range=80)
notify_chain_update.assert_called_once_with(
mock.ANY, sc_instance['id'])
sc_instances = self._list_service_chains()
self.assertEqual(len(sc_instances['servicechain_instances']), 1)
sc_instance_updated = sc_instances['servicechain_instances'][0]
self.assertEqual(sc_instance, sc_instance_updated)
self._verify_ptg_delete_cleanup_chain(provider_ptg_id)
def test_redirect_multiple_ptgs_single_prs(self):
scs_id = self._create_servicechain_spec()
_, _, policy_rule_id = self._create_tcp_redirect_rule(
"20:90", scs_id)
policy_rule_set = self.create_policy_rule_set(
name="c1", policy_rules=[policy_rule_id])
policy_rule_set_id = policy_rule_set['policy_rule_set']['id']
#Create 2 provider and 2 consumer PTGs
provider_ptg1 = self.create_policy_target_group(
name="p_ptg1",
provided_policy_rule_sets={policy_rule_set_id: None})
provider_ptg1_id = provider_ptg1['policy_target_group']['id']
self.create_policy_target_group(
name="c_ptg1",
consumed_policy_rule_sets={policy_rule_set_id: None})
provider_ptg2 = self.create_policy_target_group(
name="p_ptg2",
provided_policy_rule_sets={policy_rule_set_id: None})
provider_ptg2_id = provider_ptg2['policy_target_group']['id']
self.create_policy_target_group(
name="c_ptg2",
consumed_policy_rule_sets={policy_rule_set_id: None})
sc_instances = self._list_service_chains()
# We should have 2 service chain instances (one per provider)
self.assertEqual(len(sc_instances['servicechain_instances']), 2)
sc_instances = sc_instances['servicechain_instances']
sc_instances_provider_ptg_ids = set()
sc_instances_consumer_ptg_ids = set()
for sc_instance in sc_instances:
sc_instances_provider_ptg_ids.add(sc_instance['provider_ptg_id'])
sc_instances_consumer_ptg_ids.add(sc_instance['consumer_ptg_id'])
expected_provider_ptg_ids = {provider_ptg1_id, provider_ptg2_id}
self.assertEqual(expected_provider_ptg_ids,
sc_instances_provider_ptg_ids)
# Deleting one provider should end up deleting the one service chain
# Instance associated to it
self.delete_policy_target_group(
provider_ptg1_id, expected_res_status=204)
sc_instances = self._list_service_chains()
self.assertEqual(len(sc_instances['servicechain_instances']), 1)
sc_instance = sc_instances['servicechain_instances'][0]
self.assertNotEqual(sc_instance['provider_ptg_id'], provider_ptg1_id)
self.delete_policy_target_group(
provider_ptg2_id, expected_res_status=204)
sc_instances = self._list_service_chains()
# No more service chain instances when all the providers are deleted
self.assertEqual(len(sc_instances['servicechain_instances']), 0)
def test_redirect_to_chain(self):
scs_id = self._create_servicechain_spec()
_, _, policy_rule_id = self._create_tcp_redirect_rule(
"20:90", scs_id)
policy_rule_set = self.create_policy_rule_set(
name="c1", policy_rules=[policy_rule_id])
policy_rule_set_id = policy_rule_set['policy_rule_set']['id']
provider_ptg_id, consumer_ptg_id = self._create_provider_consumer_ptgs(
policy_rule_set_id)
sc_instances = self._list_service_chains()
# We should have one service chain instance created now
self.assertEqual(len(sc_instances['servicechain_instances']), 1)
# Verify that PTG delete cleans up the chain instances
self.delete_policy_target_group(
provider_ptg_id, expected_res_status=204)
sc_instances = self._list_service_chains()
self.assertEqual(len(sc_instances['servicechain_instances']), 0)
def test_rule_update_updates_chain(self):
scs_id = self._create_servicechain_spec()
_, _, policy_rule_id = self._create_tcp_redirect_rule("20:90", scs_id)
policy_rule_set = self.create_policy_rule_set(
name="c1", policy_rules=[policy_rule_id])
policy_rule_set_id = policy_rule_set['policy_rule_set']['id']
provider_ptg_id, consumer_ptg_id = self._create_provider_consumer_ptgs(
policy_rule_set_id)
sc_instances = self._list_service_chains()
# We should have one service chain instance created now
self.assertEqual(len(sc_instances['servicechain_instances']), 1)
sc_instance = sc_instances['servicechain_instances'][0]
# Update policy rule with new classifier and verify instance is
# recreated
classifier = self.create_policy_classifier(
protocol='TCP', port_range="80",
direction='bi')['policy_classifier']
self.update_policy_rule(policy_rule_id,
policy_classifier_id=classifier['id'])
sc_instances = self._list_service_chains()
# We should have one service chain instance created now
self.assertEqual(len(sc_instances['servicechain_instances']), 1)
sc_instance_new = sc_instances['servicechain_instances'][0]
self.assertNotEqual(sc_instance, sc_instance_new)
scs_id2 = self._create_servicechain_spec()
action = self.create_policy_action(
action_type='redirect', action_value=scs_id2)['policy_action']
self.update_policy_rule(policy_rule_id, policy_actions=[action['id']])
# Verify SC instance changed
sc_instances = self._list_service_chains()
# We should have one service chain instance created now
self.assertEqual(len(sc_instances['servicechain_instances']), 1)
sc_instance_new = sc_instances['servicechain_instances'][0]
self.assertNotEqual(sc_instance, sc_instance_new)
self.delete_policy_target_group(
provider_ptg_id, expected_res_status=204)
sc_instances = self._list_service_chains()
self.assertEqual(len(sc_instances['servicechain_instances']), 0)
def test_update_ptg_with_redirect_prs(self):
scs_id = self._create_servicechain_spec()
_, _, policy_rule_id = self._create_tcp_redirect_rule(
"20:90", scs_id)
policy_rule_set = self.create_policy_rule_set(
name="c1", policy_rules=[policy_rule_id])
policy_rule_set_id = policy_rule_set['policy_rule_set']['id']
provider_ptg, consumer_ptg = self._create_provider_consumer_ptgs()
sc_instances = self._list_service_chains()
self.assertEqual(len(sc_instances['servicechain_instances']), 0)
# We should have one service chain instance created when PTGs are
# updated with provided and consumed prs
self.update_policy_target_group(
provider_ptg,
provided_policy_rule_sets={policy_rule_set_id: ''},
consumed_policy_rule_sets={},
expected_res_status=200)
self.update_policy_target_group(
consumer_ptg,
provided_policy_rule_sets={},
consumed_policy_rule_sets={policy_rule_set_id: ''},
expected_res_status=200)
sc_instances = self._list_service_chains()
self.assertEqual(len(sc_instances['servicechain_instances']), 1)
# Verify that PTG update removing prs cleans up the chain instances
self.update_policy_target_group(
provider_ptg, provided_policy_rule_sets={},
consumed_policy_rule_sets={}, expected_res_status=200)
sc_instances = self._list_service_chains()
self.assertEqual(len(sc_instances['servicechain_instances']), 0)
def test_chain_on_apic_create(self):
scs_id = self._create_servicechain_spec(
node_types=['FIREWALL_TRANSPARENT'])
_, _, policy_rule_id = self._create_tcp_redirect_rule(
"20:90", scs_id)
policy_rule_set = self.create_policy_rule_set(
name="c1", policy_rules=[policy_rule_id])['policy_rule_set']
# Create PTGs on same L2P
l2p = self.create_l2_policy()['l2_policy']
provider = self.create_policy_target_group(
l2_policy_id=l2p['id'])['policy_target_group']
consumer = self.create_policy_target_group(
l2_policy_id=l2p['id'])['policy_target_group']
mgr = self.driver.apic_manager
mgr.reset_mock()
# Provide the redirect contract
self.update_policy_target_group(
consumer['id'],
consumed_policy_rule_sets={policy_rule_set['id']: ''})
self.assertFalse(mgr.ensure_bd_created_on_apic.called)
self.assertFalse(mgr.ensure_epg_created.called)
# Now form the chain
self.update_policy_target_group(
provider['id'],
provided_policy_rule_sets={policy_rule_set['id']: ''})
sc_instances = self._list_service_chains()
# We should have one service chain instance created now
self.assertEqual(len(sc_instances['servicechain_instances']), 1)
sc_instance = sc_instances['servicechain_instances'][0]
expected = [
# Provider EPG provided PRS
mock.call(provider['tenant_id'], provider['id'],
policy_rule_set['id'], provider=True,
contract_owner=policy_rule_set['tenant_id'],
transaction=mock.ANY),
# Consumer EPG consumed PRS
mock.call(consumer['tenant_id'], consumer['id'],
policy_rule_set['id'], provider=False,
contract_owner=policy_rule_set['tenant_id'],
transaction=mock.ANY)]
self._verify_chain_set(provider, l2p, policy_rule_set,
sc_instance, 1, pre_set_contract_calls=expected)
# New consumer doesn't trigger anything
new_consumer = self.create_policy_target_group(
l2_policy_id=l2p['id'])['policy_target_group']
mgr.reset_mock()
self.update_policy_target_group(
new_consumer['id'],
consumed_policy_rule_sets={policy_rule_set['id']: ''})
self.assertFalse(mgr.ensure_bd_created_on_apic.called)
self.assertFalse(mgr.ensure_epg_created.called)
def test_chain_on_apic_create_shared(self):
scs_id = self._create_servicechain_spec(
node_types=['FIREWALL_TRANSPARENT'], shared=True)
policy_rule_id = self._create_simple_policy_rule(
action_type='redirect', shared=True, action_value=scs_id)['id']
policy_rule_set = self.create_policy_rule_set(
name="c1", policy_rules=[policy_rule_id],
shared=True)['policy_rule_set']
# Create PTGs on same L2P
l2p = self.create_l2_policy(shared=True,
tenant_id='admin')['l2_policy']
provider = self.create_policy_target_group(
l2_policy_id=l2p['id'], shared=True)['policy_target_group']
consumer = self.create_policy_target_group(
l2_policy_id=l2p['id'])['policy_target_group']
mgr = self.driver.apic_manager
mgr.reset_mock()
# Provide the redirect contract
self.update_policy_target_group(
consumer['id'],
consumed_policy_rule_sets={policy_rule_set['id']: ''})
self.assertFalse(mgr.ensure_bd_created_on_apic.called)
self.assertFalse(mgr.ensure_epg_created.called)
# Now form the chain
self.update_policy_target_group(
provider['id'],
provided_policy_rule_sets={policy_rule_set['id']: ''})
sc_instances = self._list_service_chains()
# We should have one service chain instance created now
self.assertEqual(len(sc_instances['servicechain_instances']), 1)
sc_instance = sc_instances['servicechain_instances'][0]
expected = [
# Provider EPG provided PRS
mock.call('common', provider['id'],
policy_rule_set['id'], provider=True,
contract_owner='common',
transaction=mock.ANY),
# Consumer EPG consumed PRS
mock.call(consumer['tenant_id'], consumer['id'],
policy_rule_set['id'], provider=False,
contract_owner='common',
transaction=mock.ANY)]
self._verify_chain_set(provider, l2p, policy_rule_set,
sc_instance, 1, pre_set_contract_calls=expected)
# New consumer doesn't trigger anything
new_consumer = self.create_policy_target_group(
l2_policy_id=l2p['id'])['policy_target_group']
mgr.reset_mock()
self.update_policy_target_group(
new_consumer['id'],
consumed_policy_rule_sets={policy_rule_set['id']: ''})
self.assertFalse(mgr.ensure_bd_created_on_apic.called)
self.assertFalse(mgr.ensure_epg_created.called)
mgr.reset_mock()
self.update_policy_target_group(
provider['id'], provided_policy_rule_sets={})
# Provider EPG contract unset
expected = mock.call('common', provider['id'],
policy_rule_set['id'], provider=True,
contract_owner='common',
transaction=mock.ANY)
self._verify_chain_unset(
provider, l2p, policy_rule_set,
sc_instance, 1, pre_unset_contract_calls=[expected])
def test_chain_on_apic_delete(self):
scs_id = self._create_servicechain_spec(
node_types=['FIREWALL_TRANSPARENT', 'FIREWALL_TRANSPARENT'])
_, _, policy_rule_id = self._create_tcp_redirect_rule(
"20:90", scs_id)
policy_rule_set = self.create_policy_rule_set(
name="c1", policy_rules=[policy_rule_id])['policy_rule_set']
# Create PTGs on same L2P
l2p = self.create_l2_policy()['l2_policy']
provider = self.create_policy_target_group(
l2_policy_id=l2p['id'])['policy_target_group']
consumer = self.create_policy_target_group(
l2_policy_id=l2p['id'])['policy_target_group']
# Provide the redirect contract
self.update_policy_target_group(
provider['id'],
provided_policy_rule_sets={policy_rule_set['id']: ''})
# Now form the chain
self.update_policy_target_group(
consumer['id'],
consumed_policy_rule_sets={policy_rule_set['id']: ''})
sc_instances = self._list_service_chains()
# We should have one service chain instance created now
sc_instance = sc_instances['servicechain_instances'][0]
# Dissolve the chain by disassociation
mgr = self.driver.apic_manager
mgr.reset_mock()
self.update_policy_target_group(
provider['id'], provided_policy_rule_sets={})
# Provider EPG contract unset
expected = mock.call(provider['tenant_id'], provider['id'],
policy_rule_set['id'], provider=True,
contract_owner=policy_rule_set['tenant_id'],
transaction=mock.ANY)
self._verify_chain_unset(
provider, l2p, policy_rule_set,
sc_instance, 2, pre_unset_contract_calls=[expected])
def test_new_action_rejected_by_ptg(self):
ptg1 = self.create_policy_target_group()['policy_target_group']
ptg2 = self.create_policy_target_group()['policy_target_group']
ptg3 = self.create_policy_target_group()['policy_target_group']
simple_rule = self._create_simple_policy_rule()
simple_rule_2 = self._create_simple_policy_rule()
prs = self.create_policy_rule_set()['policy_rule_set']
prs2 = self.create_policy_rule_set(
policy_rules=[simple_rule['id']])['policy_rule_set']
prs3 = self.create_policy_rule_set(
policy_rules=[simple_rule_2['id']])['policy_rule_set']
self.update_policy_target_group(
ptg1['id'], provided_policy_rule_sets={prs['id']: ''})
self.update_policy_target_group(
ptg2['id'], provided_policy_rule_sets={prs['id']: ''})
# PTG 3 also consumes a contract
self.update_policy_target_group(
ptg3['id'], provided_policy_rule_sets={prs['id']: ''})
self.update_policy_target_group(
ptg3['id'], consumed_policy_rule_sets={prs2['id']: ''})
# Adding a normal rule to PRS works normally
self.update_policy_rule_set(prs['id'],
policy_rules=[simple_rule['id']])
redirect = self._create_simple_policy_rule(action_type='redirect')
self.update_policy_rule_set(
prs['id'], policy_rules=[simple_rule['id'], redirect['id']],
expected_res_status=200)
redirect = self._create_simple_policy_rule(action_type='redirect')
self.update_policy_rule_set(
prs3['id'], policy_rules=[simple_rule['id'], redirect['id']],
expected_res_status=200)
res = self.update_policy_target_group(
ptg2['id'], provided_policy_rule_sets={prs['id']: '',
prs3['id']: ''},
expected_res_status=400)
self.assertEqual('PTGAlreadyProvidingRedirectPRS',
res['NeutronError']['type'])
action = self.create_policy_action(
action_type='redirect')['policy_action']
self.update_policy_rule_set(
prs['id'], policy_rules=[simple_rule['id']],
expected_res_status=200)
self.update_policy_rule_set(
prs3['id'], policy_rules=[simple_rule['id']],
expected_res_status=200)
# Adding redirect action to the consumed PRS is fine
self.update_policy_rule(
simple_rule['id'], policy_actions=[action['id']],
expected_res_status=200)
def test_ptg_only_participate_one_prs_when_redirect(self):
redirect_rule = self._create_simple_policy_rule(action_type='redirect')
simple_rule = self._create_simple_policy_rule()
prs_r = self.create_policy_rule_set(
policy_rules=[redirect_rule['id']])['policy_rule_set']
prs = self.create_policy_rule_set(
policy_rules=[simple_rule['id']])['policy_rule_set']
# Creating PTG with provided redirect and multiple PRS fails
self.create_policy_target_group(
provided_policy_rule_sets={prs_r['id']: '', prs['id']: ''},
consumed_policy_rule_sets={prs['id']: ''},
expected_res_status=201)
action = self.create_policy_action(
action_type='redirect')['policy_action']
res = self.update_policy_rule(
simple_rule['id'], policy_actions=[action['id']],
expected_res_status=400)
self.assertEqual('PTGAlreadyProvidingRedirectPRS',
res['NeutronError']['type'])
def test_three_tier_sc(self):
app_db_scs_id = self._create_servicechain_spec(
node_types=['FIREWALL_TRANSPARENT', 'IDS'], shared=True)
web_app_scs_id = self._create_servicechain_spec(
node_types=['FIREWALL_TRANSPARENT', 'LOADBALANCER'])
internet_web_scs_id = self._create_servicechain_spec(
node_types=['LOADBALANCER'])
_, _, app_db_policy_rule_id = self._create_tcp_redirect_rule(
"20:90", app_db_scs_id)
_, _, web_app_policy_rule_id = self._create_tcp_redirect_rule(
"20:90", web_app_scs_id)
_, _, internet_web_policy_rule_id = self._create_tcp_redirect_rule(
"20:90", internet_web_scs_id)
app_db_policy_rule_set = self.create_policy_rule_set(
policy_rules=[app_db_policy_rule_id])['policy_rule_set']
web_app_policy_rule_set = self.create_policy_rule_set(
policy_rules=[web_app_policy_rule_id])['policy_rule_set']
internet_web_policy_rule_set = self.create_policy_rule_set(
policy_rules=[internet_web_policy_rule_id])['policy_rule_set']
# Create DB and APP PTGs on same L2P
l2p = self.create_l2_policy()['l2_policy']
db = self.create_policy_target_group(
l2_policy_id=l2p['id'])['policy_target_group']
app = self.create_policy_target_group(
l2_policy_id=l2p['id'])['policy_target_group']
# Create WEB on a different L2P
web_l2p = self.create_l2_policy()['l2_policy']
web = self.create_policy_target_group(
l2_policy_id=web_l2p['id'])['policy_target_group']
mgr = self.driver.apic_manager
mgr.reset_mock()
self.update_policy_target_group(
app['id'],
consumed_policy_rule_sets={app_db_policy_rule_set['id']: ''})
self.update_policy_target_group(
db['id'],
provided_policy_rule_sets={app_db_policy_rule_set['id']: ''})
sc_instances = self._list_service_chains()
# We should have one service chain instance created now
self.assertEqual(len(sc_instances['servicechain_instances']), 1)
sc_instance = sc_instances['servicechain_instances'][0]
seen = {sc_instance['id']}
expected = [
# Provider EPG provided PRS
mock.call(db['tenant_id'], db['id'],
app_db_policy_rule_set['id'], provider=True,
contract_owner=app_db_policy_rule_set['tenant_id'],
transaction=mock.ANY),
# Consumer EPG consumed PRS
mock.call(app['tenant_id'], app['id'],
app_db_policy_rule_set['id'], provider=False,
contract_owner=app_db_policy_rule_set['tenant_id'],
transaction=mock.ANY)]
self._verify_chain_set(db, l2p, app_db_policy_rule_set,
sc_instance, 2, pre_set_contract_calls=expected)
mgr.reset_mock()
self.update_policy_target_group(
app['id'],
consumed_policy_rule_sets={app_db_policy_rule_set['id']: ''},
provided_policy_rule_sets={web_app_policy_rule_set['id']: ''})
self.update_policy_target_group(
web['id'],
consumed_policy_rule_sets={web_app_policy_rule_set['id']: ''})
expected = [
# Provider EPG provided PRS
mock.call(app['tenant_id'], app['id'],
web_app_policy_rule_set['id'], provider=True,
contract_owner=web_app_policy_rule_set['tenant_id'],
transaction=mock.ANY),
# Consumer EPG consumed PRS
mock.call(web['tenant_id'], web['id'],
web_app_policy_rule_set['id'], provider=False,
contract_owner=web_app_policy_rule_set['tenant_id'],
transaction=mock.ANY)]
sc_instances = self._list_service_chains()
# We should have one service chain instance created now
self.assertEqual(len(sc_instances['servicechain_instances']), 2)
sc_instance = [x for x in sc_instances['servicechain_instances']
if x['id'] not in seen][0]
seen.add(sc_instance['id'])
self._verify_chain_set(app, l2p, web_app_policy_rule_set,
sc_instance, 1, pre_set_contract_calls=expected)
es = self.create_external_segment(
name='supported', cidr='192.168.0.0/24',
external_routes=[], expected_res_status=201)['external_segment']
ep = self.create_external_policy(
external_segments=[es['id']],
expected_res_status=201)['external_policy']
mgr.reset_mock()
self.update_policy_target_group(
web['id'],
consumed_policy_rule_sets={web_app_policy_rule_set['id']: ''},
provided_policy_rule_sets={internet_web_policy_rule_set['id']: ''})
self.update_external_policy(
ep['id'],
consumed_policy_rule_sets={internet_web_policy_rule_set['id']: ''})
expected = [
# Provider EPG provided PRS
mock.call(web['tenant_id'], web['id'],
internet_web_policy_rule_set['id'], provider=True,
contract_owner=internet_web_policy_rule_set['tenant_id'],
transaction=mock.ANY)]
sc_instances = self._list_service_chains()
# We should have one service chain instance created now
self.assertEqual(len(sc_instances['servicechain_instances']), 3)
sc_instance = [x for x in sc_instances['servicechain_instances']
if x['id'] not in seen][0]
seen.add(sc_instance['id'])
self._verify_chain_set(app, web_l2p, internet_web_policy_rule_set,
sc_instance, 0, pre_set_contract_calls=expected)
def test_rule_removed_by_shared(self):
scs_id = self._create_servicechain_spec(
node_types=['FIREWALL_TRANSPARENT'], shared=True)
policy_rule_id = self._create_simple_policy_rule(
action_type='redirect', shared=True, action_value=scs_id)['id']
policy_rule_set = self.create_policy_rule_set(
name="c1", policy_rules=[policy_rule_id],
shared=True)['policy_rule_set']
# Create PTGs on same L2P
l2p = self.create_l2_policy(tenant_id='noadmin')['l2_policy']
provider = self.create_policy_target_group(
l2_policy_id=l2p['id'], tenant_id='noadmin')['policy_target_group']
mgr = self.driver.apic_manager
# form the chain
self.update_policy_target_group(
provider['id'], tenant_id=provider['tenant_id'],
provided_policy_rule_sets={policy_rule_set['id']: ''})
mgr.reset_mock()
self.update_policy_rule_set(policy_rule_set['id'], policy_rules=[],
expected_res_status=200)
def _verify_chain_set(self, *args, **kwargs):
pass
def _verify_chain_unset(self, *args, **kwargs):
pass
def _list_service_chains(self):
sc_instance_list_req = self.new_list_request(
'servicechain/servicechain_instances')
res = sc_instance_list_req.get_response(self.ext_api)
return self.deserialize(self.fmt, res)
def test_ha_chain_same_subnet(self):
session = context.get_admin_context().session
# Create 2 L3Ps with same pools
l3p1 = self.create_l3_policy()['l3_policy']
l3p2 = self.create_l3_policy()['l3_policy']
# Attach proper L2Ps
l2p1 = self.create_l2_policy(l3_policy_id=l3p1['id'])['l2_policy']
l2p2 = self.create_l2_policy(l3_policy_id=l3p2['id'])['l2_policy']
# Set providers PTGs
ptg_prov_1 = self.create_policy_target_group(
l2_policy_id=l2p1['id'])['policy_target_group']
ptg_prov_2 = self.create_policy_target_group(
l2_policy_id=l2p2['id'])['policy_target_group']
# At this point, they have the same subnet CIDR associated, same will
# be for Proxy Groups
ha_spec_id = self._create_servicechain_spec(node_types=['FIREWALL_HA'])
# Create proper PRS
policy_rule_id = self._create_simple_policy_rule(
action_type='redirect', action_value=ha_spec_id)['id']
policy_rule_set = self.create_policy_rule_set(
name="c1", policy_rules=[policy_rule_id])['policy_rule_set']
# Form first and second chain
ptg_prov_1 = self.update_policy_target_group(
ptg_prov_1['id'], provided_policy_rule_sets={
policy_rule_set['id']: ''})['policy_target_group']
ptg_prov_2 = self.update_policy_target_group(
ptg_prov_2['id'], provided_policy_rule_sets={
policy_rule_set['id']: ''})['policy_target_group']
# One proxy group exists for both, each with 2 service PTs. Put them in
# HA
scis = self._list_service_chains()['servicechain_instances']
self.assertEqual(2, len(scis))
# Chain 1 targets
targets_1 = model.get_service_targets(
session, servicechain_instance_id=scis[0]['id'])
# Chain 2 targets
targets_2 = model.get_service_targets(
session, servicechain_instance_id=scis[1]['id'])
def _assert_service_targets_and_cluster_them(targets):
result = {}
for pt in targets:
pt = self.show_policy_target(
pt.policy_target_id,
is_admin_context=True)['policy_target']
result.setdefault(
pt['policy_target_group_id'], []).append(pt)
# There are 2 PTGs, and 3 PTs each
self.assertEqual(2, len(result))
for key in result:
# Sort by IP address
result[key].sort(key=lambda x: self._get_object(
'ports', x['port_id'],
self.api)['port']['fixed_ips'][0]['ip_address'])
value = result[key]
self.assertEqual(3, len(value))
# Set first PT as master of the other twos
self.update_policy_target(value[1]['id'],
cluster_id=value[0]['id'],
is_admin_context=True)
self.update_policy_target(value[2]['id'],
cluster_id=value[0]['id'],
is_admin_context=True)
return result
# Group chain 1 targets by PTG:
chain_targets_1 = _assert_service_targets_and_cluster_them(targets_1)
# Group chain 2 targets by PTG:
chain_targets_2 = _assert_service_targets_and_cluster_them(targets_2)
# Verify IPs overlap on provider side
main_ip = None
for x in range(3):
port_1 = self._get_object(
'ports', chain_targets_1[ptg_prov_1['id']][x]['port_id'],
self.api)['port']
port_2 = self._get_object(
'ports', chain_targets_2[ptg_prov_2['id']][x]['port_id'],
self.api)['port']
self.assertEqual(port_1['fixed_ips'][0]['ip_address'],
port_2['fixed_ips'][0]['ip_address'])
if x == 0:
main_ip = port_1['fixed_ips'][0]['ip_address']
# Update address ownership on second port
self.driver.update_ip_owner(
{'port': chain_targets_1[ptg_prov_1['id']][1]['port_id'],
'ip_address_v4': main_ip})
# Same address owned by another port in a different subnet
self.driver.update_ip_owner(
{'port': chain_targets_2[ptg_prov_2['id']][1]['port_id'],
'ip_address_v4': main_ip})
# There are 2 ownership entries for the same address
entries = self.driver.ha_ip_handler.session.query(
ha_ip_db.HAIPAddressToPortAssocation).all()
self.assertEqual(2, len(entries))
self.assertEqual(main_ip, entries[0].ha_ip_address)
self.assertEqual(main_ip, entries[1].ha_ip_address)
class TestProxyGroup(ApicMappingStitchingPlumberGBPTestCase):
def _get_pts_addresses(self, pts):
addresses = []
for pt in pts:
port = self._get_object('ports', pt['port_id'], self.api)['port']
addresses.extend([x['ip_address'] for x in port['fixed_ips']])
return addresses
def _proxy_tenant(self, ptg, admin_proxy):
return 'admin' if admin_proxy else ptg['tenant_id']
def _test_proxy_group_same_l2p(self, admin_proxy=False):
ptg1 = self.create_policy_target_group()['policy_target_group']
l2p = self.create_l2_policy()['l2_policy']
proxy = self.create_policy_target_group(
proxied_group_id=ptg1['id'],
l2_policy_id=l2p['id'],
tenant_id=self._proxy_tenant(ptg1, admin_proxy),
is_admin_context=admin_proxy)['policy_target_group']
# The used L2P will be ignored, and the proxy will be put on the
# proxied group's L2P
self.assertEqual(ptg1['l2_policy_id'], proxy['l2_policy_id'])
def test_proxy_group_same_l2p(self):
self._test_proxy_group_same_l2p()
def test_proxy_group_same_l2p_admin(self):
self._test_proxy_group_same_l2p(True)
def _test_l2_proxy_group_subnets(self, admin_proxy=False):
ptg = self.create_policy_target_group()['policy_target_group']
proxy = self.create_policy_target_group(
proxied_group_id=ptg['id'],
proxy_type='l2', tenant_id=self._proxy_tenant(ptg, admin_proxy),
is_admin_context=admin_proxy)['policy_target_group']
ptg = self.show_policy_target_group(ptg['id'])['policy_target_group']
self.assertEqual(ptg['subnets'], proxy['subnets'])
self.assertEqual(1, len(proxy['subnets']))
def test_l2_proxy_group_subnets(self):
self._test_l2_proxy_group_subnets()
def test_l2_proxy_group_subnets_admin(self):
self._test_l2_proxy_group_subnets(True)
def _test_l3_proxy_group_subnets(self, admin_proxy=False):
ptg1 = self.create_policy_target_group()['policy_target_group']
original_subnet = ptg1['subnets'][0]
proxy = self.create_policy_target_group(
proxied_group_id=ptg1['id'],
proxy_type='l3', tenant_id=self._proxy_tenant(ptg1, admin_proxy),
is_admin_context=admin_proxy)['policy_target_group']
ptg1 = self.show_policy_target_group(ptg1['id'])['policy_target_group']
self.assertEqual(set(ptg1['subnets']), set(proxy['subnets']))
self.assertEqual(2, len(proxy['subnets']))
proxy['subnets'].remove(original_subnet)
new_subnet = proxy['subnets'][0]
# Verify subnet from proxy pool
l2p = self.show_l2_policy(ptg1['l2_policy_id'])['l2_policy']
l3p = self.show_l3_policy(l2p['l3_policy_id'])['l3_policy']
subnet = self._get_object('subnets', new_subnet, self.api)['subnet']
self.assertTrue(netaddr.IPNetwork(subnet['cidr']) in
netaddr.IPNetwork(l3p['proxy_ip_pool']))
# Attach external segment to create a router interface
self._mock_external_dict([('supported', '192.168.0.2/24')])
es = self.create_external_segment(
name='supported', cidr='192.168.0.0/24',
external_routes=[{'destination': '0.0.0.0/0',
'nexthop': '192.168.0.254'},
{'destination': '128.0.0.0/16',
'nexthop': None}])['external_segment']
self.update_l3_policy(l2p['l3_policy_id'],
external_segments={es['id']: []},
expected_res_status=200)
# the proxy subnets should have no router
ports = self._list(
'ports',
query_params='device_owner=network:router_interface')['ports']
self.assertEqual(1, len(ports))
# this router port is only connected to the original PTG
self.assertEqual(original_subnet,
ports[0]['fixed_ips'][0]['subnet_id'])
# Verify port address comes from that subnet
pt = self.create_policy_target(
policy_target_group_id=proxy['id'],
tenant_id=proxy['tenant_id'],
is_admin_context=admin_proxy)['policy_target']
port = self._get_object('ports', pt['port_id'], self.api)['port']
self.assertTrue(
netaddr.IPNetwork(port['fixed_ips'][0]['ip_address']) in
netaddr.IPNetwork(subnet['cidr']),
"IP address %s is not part of subnet %s" % (
port['fixed_ips'][0]['ip_address'], subnet['cidr']))
def test_l3_proxy_group_subnets(self):
self._test_l3_proxy_group_subnets()
def test_l3_proxy_group_subnets_admin(self):
self._test_l3_proxy_group_subnets(True)
def test_proxy_shadow_created(self):
l2p = self.create_l2_policy()['l2_policy']
ptg = self.create_policy_target_group(
l2_policy_id=l2p['id'])['policy_target_group']
self.mgr.reset_mock()
proxy = self.create_policy_target_group(
proxied_group_id=ptg['id'])['policy_target_group']
l2p = self.show_l2_policy(ptg['l2_policy_id'])['l2_policy']
l3p = self.show_l3_policy(l2p['l3_policy_id'])['l3_policy']
# Shadow BD created
self.mgr.ensure_bd_created_on_apic.assert_called_once_with(
ptg['tenant_id'], 'Shd-' + ptg['id'], ctx_owner=l3p['tenant_id'],
ctx_name=l3p['id'], allow_broadcast=False, unicast_route=False,
transaction=mock.ANY, enforce_subnet_check=False)
# Proxied PTG moved
expected_calls = [
# Proxy created on L2P
mock.call(
ptg['tenant_id'], proxy['id'], bd_owner=l2p['tenant_id'],
bd_name=l2p['id']),
# Proxied moved on shadow BD
mock.call(
ptg['tenant_id'], ptg['id'], bd_owner=ptg['tenant_id'],
bd_name='Shd-' + ptg['id'], transaction=mock.ANY)]
self._check_call_list(expected_calls,
self.mgr.ensure_epg_created.call_args_list)
def _test_proxy_any_contract(self, admin_proxy=False):
ptg = self.create_policy_target_group()['policy_target_group']
proxy = self.create_policy_target_group(
proxied_group_id=ptg['id'],
tenant_id=self._proxy_tenant(ptg, admin_proxy),
is_admin_context=admin_proxy)['policy_target_group']
self.mgr.create_contract('any-' + ptg['id'], owner='COMMON',
transaction=mock.ANY)
expected_calls = [
mock.call(
ptg['tenant_id'], ptg['id'], 'any-' + ptg['id'], provider=True,
contract_owner='common'),
mock.call(
ptg['tenant_id'], proxy['id'], 'any-' + ptg['id'],
provider=False, contract_owner='common')]
self._check_call_list(expected_calls,
self.mgr.set_contract_for_epg.call_args_list,
check_all=False)
def test_proxy_any_contract(self):
self._test_proxy_any_contract()
def test_proxy_any_contract_admin(self):
self._test_proxy_any_contract(True)
def _test_proxy_shadow_deleted(self, admin_proxy=False):
ptg1 = self.create_policy_target_group()['policy_target_group']
original_subnet = ptg1['subnets'][0]
proxy = self.create_policy_target_group(
proxied_group_id=ptg1['id'],
tenant_id=self._proxy_tenant(ptg1, admin_proxy),
is_admin_context=admin_proxy)['policy_target_group']
self.mgr.reset_mock()
self.delete_policy_target_group(proxy['id'], expected_res_status=204)
proxy['subnets'].remove(original_subnet)
# Proxied PTG moved back
self.mgr.ensure_epg_created.assert_called_once_with(
ptg1['tenant_id'], ptg1['id'], bd_owner=ptg1['tenant_id'],
bd_name=ptg1['l2_policy_id'])
# Shadow BD deleted
self.mgr.delete_bd_on_apic.assert_called_once_with(
ptg1['tenant_id'], 'Shd-' + ptg1['id'])
# Verify Jump subnet deleted
self.assertEqual(1, len(proxy['subnets']))
self._get_object('subnets', proxy['subnets'][0], self.api,
expected_res_status=404)
def _test_get_gbp_details(self, admin_proxy=False):
ptg = self.create_policy_target_group(
name="ptg1")['policy_target_group']
pt1 = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
self._bind_port_to_host(pt1['port_id'], 'h1')
pt2 = self.create_policy_target(
policy_target_group_id=ptg['id'])['policy_target']
self._bind_port_to_host(pt2['port_id'], 'h2')
# Create proxy group
proxy = self.create_policy_target_group(
proxied_group_id=ptg['id'],
tenant_id=self._proxy_tenant(ptg, admin_proxy),
is_admin_context=admin_proxy)['policy_target_group']
proxy_gw = self.create_policy_target(
policy_target_group_id=proxy['id'],
proxy_gateway=True,
tenant_id=proxy['tenant_id'],
is_admin_context=admin_proxy)['policy_target']
self._bind_port_to_host(proxy_gw['port_id'], 'h2')
# Create a PT in the same cluster
proxy_gw_failover = self.create_policy_target(
policy_target_group_id=proxy['id'],
cluster_id=proxy_gw['id'],
tenant_id=proxy['tenant_id'],
is_admin_context=admin_proxy)['policy_target']
self._bind_port_to_host(proxy_gw_failover['port_id'], 'h2')
self._mock_external_dict([('supported', '192.168.0.2/24')])
es = self.create_external_segment(
name='supported', cidr='192.168.0.0/24',
external_routes=[], expected_res_status=201)['external_segment']
self.create_external_policy(external_segments=[es['id']],
expected_res_status=201)
l2p = self.show_l2_policy(ptg['l2_policy_id'])['l2_policy']
self.update_l3_policy(l2p['l3_policy_id'],
external_segments={es['id']: []},
expected_res_status=200)
self._bind_port_to_host(pt2['port_id'], 'h2')
master_port = self._get_object('ports', proxy_gw['port_id'],
self.api)['port']
def echo(name):
return name
self.mgr.apic.fvTenant.name = echo
mapping = self.driver.get_gbp_details(
context.get_admin_context(),
device='tap%s' % proxy_gw['port_id'], host='h2')
# Verify extra addresses
ips = self._get_pts_addresses([pt1, pt2])
self.assertEqual(set(ips), set(mapping['extra_ips']))
self.assertEqual(ptg['tenant_id'], mapping['ptg_tenant'])
self.assertEqual(1, len(mapping['ip_mapping']))
# No SNAT subnet
self.assertEqual(0, len(mapping['host_snat_ips']))
group_default_gw = self.create_policy_target(
policy_target_group_id=ptg['id'],
group_default_gateway=True,
tenant_id=ptg['tenant_id'])['policy_target']
self._bind_port_to_host(pt2['port_id'], 'h2')
mapping = self.driver.get_gbp_details(
context.get_admin_context(),
device='tap%s' % group_default_gw['port_id'], host='h2')
self.assertTrue(mapping['promiscuous_mode'])
# No extra IPs for the failover since it doesn't own the master IP
mapping = self.driver.get_gbp_details(
context.get_admin_context(),
device='tap%s' % proxy_gw_failover['port_id'], host='h2')
self.assertEqual(0, len(mapping['extra_ips'] or []))
self.assertEqual(
[{'mac_address': master_port['mac_address'],
'ip_address': master_port['fixed_ips'][0]['ip_address']}],
mapping['allowed_address_pairs'])
# Set the port ownership and verify that extra_ips is correctly set
ips = self._get_pts_addresses([pt1, pt2, group_default_gw])
for x in master_port['fixed_ips']:
self.driver.ha_ip_handler.set_port_id_for_ha_ipaddress(
proxy_gw_failover['port_id'], x['ip_address'])
mapping = self.driver.get_gbp_details(
context.get_admin_context(),
device='tap%s' % proxy_gw_failover['port_id'], host='h2')
self.assertEqual(
set(ips), set(mapping['extra_details'][master_port['mac_address']][
'extra_ips']))
self.assertEqual(
[{'mac_address': master_port['mac_address'],
'ip_address': master_port['fixed_ips'][0]['ip_address'],
'active': True}],
mapping['allowed_address_pairs'])
def test_get_gbp_details(self):
self._test_get_gbp_details()
def test_get_gbp_details_admin(self):
self._test_get_gbp_details(True)
def test_cluster_promiscuous_mode(self):
ptg = self.create_policy_target_group(
name="ptg1")['policy_target_group']
# Create proxy group
self.create_policy_target_group(proxied_group_id=ptg['id'])
group_gw = self.create_policy_target(
policy_target_group_id=ptg['id'],
group_default_gateway=True)['policy_target']
# Create a PT in the same cluster
group_gw_failover = self.create_policy_target(
policy_target_group_id=ptg['id'],
cluster_id=group_gw['id'])['policy_target']
mapping = self.driver.get_gbp_details(
context.get_admin_context(),
device='tap%s' % group_gw_failover['port_id'], host='h2')
self.assertTrue(mapping['promiscuous_mode'])
def _test_end_chain_notified(self, admin_proxy=False):
self.driver.notifier = mock.Mock()
ptg = self.create_policy_target_group(
name="ptg1")['policy_target_group']
proxy1 = self.create_policy_target_group(
proxied_group_id=ptg['id'],
tenant_id=self._proxy_tenant(ptg, admin_proxy),
is_admin_context=admin_proxy)['policy_target_group']
proxy2 = self.create_policy_target_group(
proxied_group_id=proxy1['id'],
tenant_id=self._proxy_tenant(ptg, admin_proxy),
is_admin_context=admin_proxy)['policy_target_group']
proxy_gw = self.create_policy_target(
policy_target_group_id=proxy2['id'],
proxy_gateway=True, tenant_id=proxy2['tenant_id'],
is_admin_context=admin_proxy)['policy_target']
self._bind_port_to_host(proxy_gw['port_id'], 'h2')
self.driver.notifier.reset_mock()
# Create a PT on the starting PTG, and verify that proxy_gw is
# notified
self.create_policy_target(policy_target_group_id=ptg['id'])
self.assertEqual(1, self.driver.notifier.port_update.call_count)
self.assertEqual(
proxy_gw['port_id'],
self.driver.notifier.port_update.call_args[0][1]['id'])
def _test_end_chain_notified_cluster_id(self, admin_proxy=False):
self.driver.notifier = mock.Mock()
ptg = self.create_policy_target_group(
name="ptg1")['policy_target_group']
proxy1 = self.create_policy_target_group(
proxied_group_id=ptg['id'],
tenant_id=self._proxy_tenant(ptg, admin_proxy),
is_admin_context=admin_proxy)['policy_target_group']
proxy2 = self.create_policy_target_group(
proxied_group_id=proxy1['id'],
tenant_id=self._proxy_tenant(ptg, admin_proxy),
is_admin_context=admin_proxy)['policy_target_group']
proxy_gw_master = self.create_policy_target(
policy_target_group_id=proxy2['id'],
proxy_gateway=True, tenant_id=proxy2['tenant_id'],
is_admin_context=admin_proxy)['policy_target']
# The following is not proxy gateway, but is part of a proxy_gw cluster
proxy_gw = self.create_policy_target(
policy_target_group_id=proxy2['id'],
cluster_id=proxy_gw_master['id'], tenant_id=proxy2['tenant_id'],
is_admin_context=admin_proxy)['policy_target']
self._bind_port_to_host(proxy_gw['port_id'], 'h2')
self.driver.notifier.reset_mock()
# Create a PT on the starting PTG, and verify that proxy_gw is
# notified
self.create_policy_target(policy_target_group_id=ptg['id'])
self.assertEqual(1, self.driver.notifier.port_update.call_count)
self.assertEqual(
proxy_gw['port_id'],
self.driver.notifier.port_update.call_args[0][1]['id'])
def test_end_chain_notified(self):
self._test_end_chain_notified()
def test_end_chain_notified_admin(self):
self._test_end_chain_notified(True)
def test_proxy_group_right_tenant(self):
l2p = self.create_l2_policy(tenant_id='non-admin')['l2_policy']
ptg = self.create_policy_target_group(
l2_policy_id=l2p['id'],
tenant_id='non-admin')['policy_target_group']
self.mgr.reset_mock()
proxy = self.create_policy_target_group(
proxied_group_id=ptg['id'], is_admin_context=True,
tenant_id='admin-tenant')['policy_target_group']
l3p = self.show_l3_policy(l2p['l3_policy_id'],
tenant_id='non-admin')['l3_policy']
# Shadow BD created in non-admin tenant
self.mgr.ensure_bd_created_on_apic.assert_called_once_with(
'non-admin', 'Shd-' + ptg['id'], ctx_owner=l3p['tenant_id'],
ctx_name=l3p['id'], allow_broadcast=False, unicast_route=False,
transaction=mock.ANY, enforce_subnet_check=False)
# Proxied PTG moved
expected_calls = [
# Proxy created on L2P
mock.call(
'non-admin', proxy['id'], bd_owner=l2p['tenant_id'],
bd_name=l2p['id']),
# Proxied moved on shadow BD
mock.call(
'non-admin', ptg['id'], bd_owner=ptg['tenant_id'],
bd_name='Shd-' + ptg['id'], transaction=mock.ANY)]
self._check_call_list(expected_calls,
self.mgr.ensure_epg_created.call_args_list)
def test_prs_sync_with_proxy(self, admin_proxy=False):
rule = self._create_ssh_allow_rule()
policy_rule_set_1 = self.create_policy_rule_set(
name="c1", policy_rules=[rule['id']])['policy_rule_set']
policy_rule_set_2 = self.create_policy_rule_set(
name="c2", policy_rules=[rule['id']])['policy_rule_set']
ptg = self.create_policy_target_group(
provided_policy_rule_sets={policy_rule_set_1['id']: ''},
consumed_policy_rule_sets={policy_rule_set_2['id']: ''})[
'policy_target_group']
# Sync on proxy creation
proxy1 = self.create_policy_target_group(
proxied_group_id=ptg['id'],
tenant_id=self._proxy_tenant(ptg, admin_proxy),
is_admin_context=admin_proxy)['policy_target_group']
self.assertEqual([policy_rule_set_1['id']],
proxy1['provided_policy_rule_sets'])
self.assertEqual([policy_rule_set_2['id']],
proxy1['consumed_policy_rule_sets'])
proxy2 = self.create_policy_target_group(
proxied_group_id=proxy1['id'],
tenant_id=self._proxy_tenant(ptg, admin_proxy),
is_admin_context=admin_proxy)['policy_target_group']
self.assertEqual([policy_rule_set_1['id']],
proxy2['provided_policy_rule_sets'])
self.assertEqual([policy_rule_set_2['id']],
proxy2['consumed_policy_rule_sets'])
# Sync on original update
self.update_policy_target_group(
ptg['id'], provided_policy_rule_sets={})
proxy1 = self.show_policy_target_group(
proxy1['id'])['policy_target_group']
self.assertEqual([],
proxy1['provided_policy_rule_sets'])
self.assertEqual([policy_rule_set_2['id']],
proxy1['consumed_policy_rule_sets'])
proxy2 = self.show_policy_target_group(
proxy2['id'])['policy_target_group']
self.assertEqual([],
proxy1['provided_policy_rule_sets'])
self.assertEqual([policy_rule_set_2['id']],
proxy2['consumed_policy_rule_sets'])
def test_l3_proxy_subnets_delete(self):
l2p = self.create_l2_policy(tenant_id='non-admin')['l2_policy']
ptg = self.create_policy_target_group(
l2_policy_id=l2p['id'],
tenant_id='non-admin')['policy_target_group']
main_subnet = set(ptg['subnets'])
self.mgr.reset_mock()
proxy = self.create_policy_target_group(
proxied_group_id=ptg['id'], is_admin_context=True,
tenant_id='admin-tenant')['policy_target_group']
subnets = set(proxy['subnets'])
added = subnets - main_subnet
self.delete_policy_target_group(proxy['id'], is_admin_context=True,
expected_res_status=204)
# Subnet doesn't exist anymore
self._get_object('subnets', list(added)[0], self.api,
expected_res_status=404)
class TestApicChainsAdminOwner(TestApicChains):
def setUp(self, **kwargs):
mock.patch('gbpservice.neutron.services.grouppolicy.drivers.'
'chain_mapping.ChainMappingDriver.'
'chain_tenant_keystone_client').start()
res = mock.patch('gbpservice.neutron.services.grouppolicy.drivers.'
'chain_mapping.ChainMappingDriver.'
'chain_tenant_id').start()
res.return_value = test_rmd.CHAIN_TENANT_ID
super(TestApicChainsAdminOwner, self).setUp(**kwargs)
def _assert_proper_chain_instance(self, sc_instance, provider_ptg_id,
consumer_ptg_id, scs_id_list,
classifier_id=None):
self.assertEqual(sc_instance['provider_ptg_id'], provider_ptg_id)
self.assertEqual(sc_instance['consumer_ptg_id'], 'N/A')
self.assertEqual(sc_instance['tenant_id'], test_rmd.CHAIN_TENANT_ID)
self.assertEqual(scs_id_list, sc_instance['servicechain_specs'])
if classifier_id:
self.assertEqual(sc_instance['classifier_id'], classifier_id)
| |
"""ACME AuthHandler."""
import itertools
import logging
import time
import zope.component
from acme import challenges
from acme import messages
from letsencrypt import achallenges
from letsencrypt import constants
from letsencrypt import errors
from letsencrypt import error_handler
from letsencrypt import interfaces
logger = logging.getLogger(__name__)
class AuthHandler(object):
"""ACME Authorization Handler for a client.
:ivar dv_auth: Authenticator capable of solving
:class:`~acme.challenges.DVChallenge` types
:type dv_auth: :class:`letsencrypt.interfaces.IAuthenticator`
:ivar cont_auth: Authenticator capable of solving
:class:`~acme.challenges.ContinuityChallenge` types
:type cont_auth: :class:`letsencrypt.interfaces.IAuthenticator`
:ivar acme.client.Client acme: ACME client API.
:ivar account: Client's Account
:type account: :class:`letsencrypt.account.Account`
:ivar dict authzr: ACME Authorization Resource dict where keys are domains
and values are :class:`acme.messages.AuthorizationResource`
:ivar list dv_c: DV challenges in the form of
:class:`letsencrypt.achallenges.AnnotatedChallenge`
:ivar list cont_c: Continuity challenges in the
form of :class:`letsencrypt.achallenges.AnnotatedChallenge`
"""
def __init__(self, dv_auth, cont_auth, acme, account):
self.dv_auth = dv_auth
self.cont_auth = cont_auth
self.acme = acme
self.account = account
self.authzr = dict()
# List must be used to keep responses straight.
self.dv_c = []
self.cont_c = []
def get_authorizations(self, domains, best_effort=False):
"""Retrieve all authorizations for challenges.
:param list domains: Domains for authorization
:param bool best_effort: Whether or not all authorizations are
required (this is useful in renewal)
:returns: tuple of lists of authorization resources. Takes the
form of (`completed`, `failed`)
:rtype: tuple
:raises .AuthorizationError: If unable to retrieve all
authorizations
"""
for domain in domains:
self.authzr[domain] = self.acme.request_domain_challenges(
domain, self.account.regr.new_authzr_uri)
self._choose_challenges(domains)
# While there are still challenges remaining...
while self.dv_c or self.cont_c:
cont_resp, dv_resp = self._solve_challenges()
logger.info("Waiting for verification...")
# Send all Responses - this modifies dv_c and cont_c
self._respond(cont_resp, dv_resp, best_effort)
# Just make sure all decisions are complete.
self.verify_authzr_complete()
# Only return valid authorizations
return [authzr for authzr in self.authzr.values()
if authzr.body.status == messages.STATUS_VALID]
def _choose_challenges(self, domains):
"""Retrieve necessary challenges to satisfy server."""
logger.info("Performing the following challenges:")
for dom in domains:
path = gen_challenge_path(
self.authzr[dom].body.challenges,
self._get_chall_pref(dom),
self.authzr[dom].body.combinations)
dom_cont_c, dom_dv_c = self._challenge_factory(
dom, path)
self.dv_c.extend(dom_dv_c)
self.cont_c.extend(dom_cont_c)
def _solve_challenges(self):
"""Get Responses for challenges from authenticators."""
cont_resp = []
dv_resp = []
with error_handler.ErrorHandler(self._cleanup_challenges):
try:
if self.cont_c:
cont_resp = self.cont_auth.perform(self.cont_c)
if self.dv_c:
dv_resp = self.dv_auth.perform(self.dv_c)
except errors.AuthorizationError:
logger.critical("Failure in setting up challenges.")
logger.info("Attempting to clean up outstanding challenges...")
raise
assert len(cont_resp) == len(self.cont_c)
assert len(dv_resp) == len(self.dv_c)
return cont_resp, dv_resp
def _respond(self, cont_resp, dv_resp, best_effort):
"""Send/Receive confirmation of all challenges.
.. note:: This method also cleans up the auth_handler state.
"""
# TODO: chall_update is a dirty hack to get around acme-spec #105
chall_update = dict()
active_achalls = []
active_achalls.extend(
self._send_responses(self.dv_c, dv_resp, chall_update))
active_achalls.extend(
self._send_responses(self.cont_c, cont_resp, chall_update))
# Check for updated status...
try:
self._poll_challenges(chall_update, best_effort)
finally:
# This removes challenges from self.dv_c and self.cont_c
self._cleanup_challenges(active_achalls)
def _send_responses(self, achalls, resps, chall_update):
"""Send responses and make sure errors are handled.
:param dict chall_update: parameter that is updated to hold
authzr -> list of outstanding solved annotated challenges
"""
active_achalls = []
for achall, resp in itertools.izip(achalls, resps):
# This line needs to be outside of the if block below to
# ensure failed challenges are cleaned up correctly
active_achalls.append(achall)
# Don't send challenges for None and False authenticator responses
if resp is not None and resp:
self.acme.answer_challenge(achall.challb, resp)
# TODO: answer_challenge returns challr, with URI,
# that can be used in _find_updated_challr
# comparisons...
if achall.domain in chall_update:
chall_update[achall.domain].append(achall)
else:
chall_update[achall.domain] = [achall]
return active_achalls
def _poll_challenges(
self, chall_update, best_effort, min_sleep=3, max_rounds=15):
"""Wait for all challenge results to be determined."""
dom_to_check = set(chall_update.keys())
comp_domains = set()
rounds = 0
while dom_to_check and rounds < max_rounds:
# TODO: Use retry-after...
time.sleep(min_sleep)
all_failed_achalls = set()
for domain in dom_to_check:
comp_achalls, failed_achalls = self._handle_check(
domain, chall_update[domain])
if len(comp_achalls) == len(chall_update[domain]):
comp_domains.add(domain)
elif not failed_achalls:
for achall, _ in comp_achalls:
chall_update[domain].remove(achall)
# We failed some challenges... damage control
else:
# Right now... just assume a loss and carry on...
if best_effort:
comp_domains.add(domain)
else:
all_failed_achalls.update(
updated for _, updated in failed_achalls)
if all_failed_achalls:
_report_failed_challs(all_failed_achalls)
raise errors.FailedChallenges(all_failed_achalls)
dom_to_check -= comp_domains
comp_domains.clear()
rounds += 1
def _handle_check(self, domain, achalls):
"""Returns tuple of ('completed', 'failed')."""
completed = []
failed = []
self.authzr[domain], _ = self.acme.poll(self.authzr[domain])
if self.authzr[domain].body.status == messages.STATUS_VALID:
return achalls, []
# Note: if the whole authorization is invalid, the individual failed
# challenges will be determined here...
for achall in achalls:
updated_achall = achall.update(challb=self._find_updated_challb(
self.authzr[domain], achall))
# This does nothing for challenges that have yet to be decided yet.
if updated_achall.status == messages.STATUS_VALID:
completed.append((achall, updated_achall))
elif updated_achall.status == messages.STATUS_INVALID:
failed.append((achall, updated_achall))
return completed, failed
def _find_updated_challb(self, authzr, achall): # pylint: disable=no-self-use
"""Find updated challenge body within Authorization Resource.
.. warning:: This assumes only one instance of type of challenge in
each challenge resource.
:param .AuthorizationResource authzr: Authorization Resource
:param .AnnotatedChallenge achall: Annotated challenge for which
to get status
"""
for authzr_challb in authzr.body.challenges:
if type(authzr_challb.chall) is type(achall.challb.chall): # noqa
return authzr_challb
raise errors.AuthorizationError(
"Target challenge not found in authorization resource")
def _get_chall_pref(self, domain):
"""Return list of challenge preferences.
:param str domain: domain for which you are requesting preferences
"""
# Make sure to make a copy...
chall_prefs = []
chall_prefs.extend(self.cont_auth.get_chall_pref(domain))
chall_prefs.extend(self.dv_auth.get_chall_pref(domain))
return chall_prefs
def _cleanup_challenges(self, achall_list=None):
"""Cleanup challenges.
If achall_list is not provided, cleanup all achallenges.
"""
logger.info("Cleaning up challenges")
if achall_list is None:
dv_c = self.dv_c
cont_c = self.cont_c
else:
dv_c = [achall for achall in achall_list
if isinstance(achall.chall, challenges.DVChallenge)]
cont_c = [achall for achall in achall_list if isinstance(
achall.chall, challenges.ContinuityChallenge)]
if dv_c:
self.dv_auth.cleanup(dv_c)
for achall in dv_c:
self.dv_c.remove(achall)
if cont_c:
self.cont_auth.cleanup(cont_c)
for achall in cont_c:
self.cont_c.remove(achall)
def verify_authzr_complete(self):
"""Verifies that all authorizations have been decided.
:returns: Whether all authzr are complete
:rtype: bool
"""
for authzr in self.authzr.values():
if (authzr.body.status != messages.STATUS_VALID and
authzr.body.status != messages.STATUS_INVALID):
raise errors.AuthorizationError("Incomplete authorizations")
def _challenge_factory(self, domain, path):
"""Construct Namedtuple Challenges
:param str domain: domain of the enrollee
:param list path: List of indices from `challenges`.
:returns: dv_chall, list of DVChallenge type
:class:`letsencrypt.achallenges.Indexed`
cont_chall, list of ContinuityChallenge type
:class:`letsencrypt.achallenges.Indexed`
:rtype: tuple
:raises .errors.Error: if challenge type is not recognized
"""
dv_chall = []
cont_chall = []
for index in path:
challb = self.authzr[domain].body.challenges[index]
chall = challb.chall
achall = challb_to_achall(challb, self.account.key, domain)
if isinstance(chall, challenges.ContinuityChallenge):
cont_chall.append(achall)
elif isinstance(chall, challenges.DVChallenge):
dv_chall.append(achall)
return cont_chall, dv_chall
def challb_to_achall(challb, account_key, domain):
"""Converts a ChallengeBody object to an AnnotatedChallenge.
:param .ChallengeBody challb: ChallengeBody
:param .JWK account_key: Authorized Account Key
:param str domain: Domain of the challb
:returns: Appropriate AnnotatedChallenge
:rtype: :class:`letsencrypt.achallenges.AnnotatedChallenge`
"""
chall = challb.chall
logger.info("%s challenge for %s", chall.typ, domain)
if isinstance(chall, challenges.KeyAuthorizationChallenge):
return achallenges.KeyAuthorizationAnnotatedChallenge(
challb=challb, domain=domain, account_key=account_key)
elif isinstance(chall, challenges.DNS):
return achallenges.DNS(challb=challb, domain=domain)
elif isinstance(chall, challenges.RecoveryContact):
return achallenges.RecoveryContact(
challb=challb, domain=domain)
elif isinstance(chall, challenges.ProofOfPossession):
return achallenges.ProofOfPossession(
challb=challb, domain=domain)
else:
raise errors.Error(
"Received unsupported challenge of type: %s", chall.typ)
def gen_challenge_path(challbs, preferences, combinations):
"""Generate a plan to get authority over the identity.
.. todo:: This can be possibly be rewritten to use resolved_combinations.
:param tuple challbs: A tuple of challenges
(:class:`acme.messages.Challenge`) from
:class:`acme.messages.AuthorizationResource` to be
fulfilled by the client in order to prove possession of the
identifier.
:param list preferences: List of challenge preferences for domain
(:class:`acme.challenges.Challenge` subclasses)
:param tuple combinations: A collection of sets of challenges from
:class:`acme.messages.Challenge`, each of which would
be sufficient to prove possession of the identifier.
:returns: tuple of indices from ``challenges``.
:rtype: tuple
:raises letsencrypt.errors.AuthorizationError: If a
path cannot be created that satisfies the CA given the preferences and
combinations.
"""
if combinations:
return _find_smart_path(challbs, preferences, combinations)
else:
return _find_dumb_path(challbs, preferences)
def _find_smart_path(challbs, preferences, combinations):
"""Find challenge path with server hints.
Can be called if combinations is included. Function uses a simple
ranking system to choose the combo with the lowest cost.
"""
chall_cost = {}
max_cost = 1
for i, chall_cls in enumerate(preferences):
chall_cost[chall_cls] = i
max_cost += i
# max_cost is now equal to sum(indices) + 1
best_combo = []
# Set above completing all of the available challenges
best_combo_cost = max_cost
combo_total = 0
for combo in combinations:
for challenge_index in combo:
combo_total += chall_cost.get(challbs[
challenge_index].chall.__class__, max_cost)
if combo_total < best_combo_cost:
best_combo = combo
best_combo_cost = combo_total
combo_total = 0
if not best_combo:
msg = ("Client does not support any combination of challenges that "
"will satisfy the CA.")
logger.fatal(msg)
raise errors.AuthorizationError(msg)
return best_combo
def _find_dumb_path(challbs, preferences):
"""Find challenge path without server hints.
Should be called if the combinations hint is not included by the
server. This function returns the best path that does not contain
multiple mutually exclusive challenges.
"""
assert len(preferences) == len(set(preferences))
path = []
satisfied = set()
for pref_c in preferences:
for i, offered_challb in enumerate(challbs):
if (isinstance(offered_challb.chall, pref_c) and
is_preferred(offered_challb, satisfied)):
path.append(i)
satisfied.add(offered_challb)
return path
def mutually_exclusive(obj1, obj2, groups, different=False):
"""Are two objects mutually exclusive?"""
for group in groups:
obj1_present = False
obj2_present = False
for obj_cls in group:
obj1_present |= isinstance(obj1, obj_cls)
obj2_present |= isinstance(obj2, obj_cls)
if obj1_present and obj2_present and (
not different or not isinstance(obj1, obj2.__class__)):
return False
return True
def is_preferred(offered_challb, satisfied,
exclusive_groups=constants.EXCLUSIVE_CHALLENGES):
"""Return whether or not the challenge is preferred in path."""
for challb in satisfied:
if not mutually_exclusive(
offered_challb.chall, challb.chall, exclusive_groups,
different=True):
return False
return True
_ACME_PREFIX = "urn:acme:error:"
_ERROR_HELP_COMMON = (
"To fix these errors, please make sure that your domain name was entered "
"correctly and the DNS A record(s) for that domain contain(s) the "
"right IP address.")
_ERROR_HELP = {
"connection":
_ERROR_HELP_COMMON + " Additionally, please check that your computer "
"has a publicly routable IP address and that no firewalls are preventing "
"the server from communicating with the client. If you're using the "
"webroot plugin, you should also verify that you are serving files "
"from the webroot path you provided.",
"dnssec":
_ERROR_HELP_COMMON + " Additionally, if you have DNSSEC enabled for "
"your domain, please ensure that the signature is valid.",
"malformed":
"To fix these errors, please make sure that you did not provide any "
"invalid information to the client, and try running Let's Encrypt "
"again.",
"serverInternal":
"Unfortunately, an error on the ACME server prevented you from completing "
"authorization. Please try again later.",
"tls":
_ERROR_HELP_COMMON + " Additionally, please check that you have an "
"up-to-date TLS configuration that allows the server to communicate "
"with the Let's Encrypt client.",
"unauthorized": _ERROR_HELP_COMMON,
"unknownHost": _ERROR_HELP_COMMON,
}
def _report_failed_challs(failed_achalls):
"""Notifies the user about failed challenges.
:param set failed_achalls: A set of failed
:class:`letsencrypt.achallenges.AnnotatedChallenge`.
"""
problems = dict()
for achall in failed_achalls:
if achall.error:
problems.setdefault(achall.error.typ, []).append(achall)
reporter = zope.component.getUtility(interfaces.IReporter)
for achalls in problems.itervalues():
reporter.add_message(
_generate_failed_chall_msg(achalls), reporter.MEDIUM_PRIORITY)
def _generate_failed_chall_msg(failed_achalls):
"""Creates a user friendly error message about failed challenges.
:param list failed_achalls: A list of failed
:class:`letsencrypt.achallenges.AnnotatedChallenge` with the same error
type.
:returns: A formatted error message for the client.
:rtype: str
"""
typ = failed_achalls[0].error.typ
if typ.startswith(_ACME_PREFIX):
typ = typ[len(_ACME_PREFIX):]
msg = ["The following errors were reported by the server:"]
for achall in failed_achalls:
msg.append("\n\nDomain: %s\nType: %s\nDetail: %s" % (
achall.domain, typ, achall.error.detail))
if typ in _ERROR_HELP:
msg.append("\n\n")
msg.append(_ERROR_HELP[typ])
return "".join(msg)
| |
# Copyright 2012 Vincent Jacques
# vincent@vincent-jacques.net
# This file is part of PyGithub. http://vincent-jacques.net/PyGithub
# PyGithub is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License along with PyGithub. If not, see <http://www.gnu.org/licenses/>.
import GithubObject
class Download( GithubObject.GithubObject ):
@property
def accesskeyid( self ):
self._completeIfNotSet( self._accesskeyid )
return self._NoneIfNotSet( self._accesskeyid )
@property
def acl( self ):
self._completeIfNotSet( self._acl )
return self._NoneIfNotSet( self._acl )
@property
def bucket( self ):
self._completeIfNotSet( self._bucket )
return self._NoneIfNotSet( self._bucket )
@property
def content_type( self ):
self._completeIfNotSet( self._content_type )
return self._NoneIfNotSet( self._content_type )
@property
def created_at( self ):
self._completeIfNotSet( self._created_at )
return self._NoneIfNotSet( self._created_at )
@property
def description( self ):
self._completeIfNotSet( self._description )
return self._NoneIfNotSet( self._description )
@property
def download_count( self ):
self._completeIfNotSet( self._download_count )
return self._NoneIfNotSet( self._download_count )
@property
def expirationdate( self ):
self._completeIfNotSet( self._expirationdate )
return self._NoneIfNotSet( self._expirationdate )
@property
def html_url( self ):
self._completeIfNotSet( self._html_url )
return self._NoneIfNotSet( self._html_url )
@property
def id( self ):
self._completeIfNotSet( self._id )
return self._NoneIfNotSet( self._id )
@property
def mime_type( self ):
self._completeIfNotSet( self._mime_type )
return self._NoneIfNotSet( self._mime_type )
@property
def name( self ):
self._completeIfNotSet( self._name )
return self._NoneIfNotSet( self._name )
@property
def path( self ):
self._completeIfNotSet( self._path )
return self._NoneIfNotSet( self._path )
@property
def policy( self ):
self._completeIfNotSet( self._policy )
return self._NoneIfNotSet( self._policy )
@property
def prefix( self ):
self._completeIfNotSet( self._prefix )
return self._NoneIfNotSet( self._prefix )
@property
def redirect( self ):
self._completeIfNotSet( self._redirect )
return self._NoneIfNotSet( self._redirect )
@property
def s3_url( self ):
self._completeIfNotSet( self._s3_url )
return self._NoneIfNotSet( self._s3_url )
@property
def signature( self ):
self._completeIfNotSet( self._signature )
return self._NoneIfNotSet( self._signature )
@property
def size( self ):
self._completeIfNotSet( self._size )
return self._NoneIfNotSet( self._size )
@property
def url( self ):
self._completeIfNotSet( self._url )
return self._NoneIfNotSet( self._url )
def delete( self ):
headers, data = self._requester.requestAndCheck(
"DELETE",
self.url,
None,
None
)
def _initAttributes( self ):
self._accesskeyid = GithubObject.NotSet
self._acl = GithubObject.NotSet
self._bucket = GithubObject.NotSet
self._content_type = GithubObject.NotSet
self._created_at = GithubObject.NotSet
self._description = GithubObject.NotSet
self._download_count = GithubObject.NotSet
self._expirationdate = GithubObject.NotSet
self._html_url = GithubObject.NotSet
self._id = GithubObject.NotSet
self._mime_type = GithubObject.NotSet
self._name = GithubObject.NotSet
self._path = GithubObject.NotSet
self._policy = GithubObject.NotSet
self._prefix = GithubObject.NotSet
self._redirect = GithubObject.NotSet
self._s3_url = GithubObject.NotSet
self._signature = GithubObject.NotSet
self._size = GithubObject.NotSet
self._url = GithubObject.NotSet
def _useAttributes( self, attributes ):
if "accesskeyid" in attributes: # pragma no branch
assert attributes[ "accesskeyid" ] is None or isinstance( attributes[ "accesskeyid" ], ( str, unicode ) ), attributes[ "accesskeyid" ]
self._accesskeyid = attributes[ "accesskeyid" ]
if "acl" in attributes: # pragma no branch
assert attributes[ "acl" ] is None or isinstance( attributes[ "acl" ], ( str, unicode ) ), attributes[ "acl" ]
self._acl = attributes[ "acl" ]
if "bucket" in attributes: # pragma no branch
assert attributes[ "bucket" ] is None or isinstance( attributes[ "bucket" ], ( str, unicode ) ), attributes[ "bucket" ]
self._bucket = attributes[ "bucket" ]
if "content_type" in attributes: # pragma no branch
assert attributes[ "content_type" ] is None or isinstance( attributes[ "content_type" ], ( str, unicode ) ), attributes[ "content_type" ]
self._content_type = attributes[ "content_type" ]
if "created_at" in attributes: # pragma no branch
assert attributes[ "created_at" ] is None or isinstance( attributes[ "created_at" ], ( str, unicode ) ), attributes[ "created_at" ]
self._created_at = self._parseDatetime( attributes[ "created_at" ] )
if "description" in attributes: # pragma no branch
assert attributes[ "description" ] is None or isinstance( attributes[ "description" ], ( str, unicode ) ), attributes[ "description" ]
self._description = attributes[ "description" ]
if "download_count" in attributes: # pragma no branch
assert attributes[ "download_count" ] is None or isinstance( attributes[ "download_count" ], int ), attributes[ "download_count" ]
self._download_count = attributes[ "download_count" ]
if "expirationdate" in attributes: # pragma no branch
assert attributes[ "expirationdate" ] is None or isinstance( attributes[ "expirationdate" ], ( str, unicode ) ), attributes[ "expirationdate" ]
self._expirationdate = self._parseDatetime( attributes[ "expirationdate" ] )
if "html_url" in attributes: # pragma no branch
assert attributes[ "html_url" ] is None or isinstance( attributes[ "html_url" ], ( str, unicode ) ), attributes[ "html_url" ]
self._html_url = attributes[ "html_url" ]
if "id" in attributes: # pragma no branch
assert attributes[ "id" ] is None or isinstance( attributes[ "id" ], int ), attributes[ "id" ]
self._id = attributes[ "id" ]
if "mime_type" in attributes: # pragma no branch
assert attributes[ "mime_type" ] is None or isinstance( attributes[ "mime_type" ], ( str, unicode ) ), attributes[ "mime_type" ]
self._mime_type = attributes[ "mime_type" ]
if "name" in attributes: # pragma no branch
assert attributes[ "name" ] is None or isinstance( attributes[ "name" ], ( str, unicode ) ), attributes[ "name" ]
self._name = attributes[ "name" ]
if "path" in attributes: # pragma no branch
assert attributes[ "path" ] is None or isinstance( attributes[ "path" ], ( str, unicode ) ), attributes[ "path" ]
self._path = attributes[ "path" ]
if "policy" in attributes: # pragma no branch
assert attributes[ "policy" ] is None or isinstance( attributes[ "policy" ], ( str, unicode ) ), attributes[ "policy" ]
self._policy = attributes[ "policy" ]
if "prefix" in attributes: # pragma no branch
assert attributes[ "prefix" ] is None or isinstance( attributes[ "prefix" ], ( str, unicode ) ), attributes[ "prefix" ]
self._prefix = attributes[ "prefix" ]
if "redirect" in attributes: # pragma no branch
assert attributes[ "redirect" ] is None or isinstance( attributes[ "redirect" ], bool ), attributes[ "redirect" ]
self._redirect = attributes[ "redirect" ]
if "s3_url" in attributes: # pragma no branch
assert attributes[ "s3_url" ] is None or isinstance( attributes[ "s3_url" ], ( str, unicode ) ), attributes[ "s3_url" ]
self._s3_url = attributes[ "s3_url" ]
if "signature" in attributes: # pragma no branch
assert attributes[ "signature" ] is None or isinstance( attributes[ "signature" ], ( str, unicode ) ), attributes[ "signature" ]
self._signature = attributes[ "signature" ]
if "size" in attributes: # pragma no branch
assert attributes[ "size" ] is None or isinstance( attributes[ "size" ], int ), attributes[ "size" ]
self._size = attributes[ "size" ]
if "url" in attributes: # pragma no branch
assert attributes[ "url" ] is None or isinstance( attributes[ "url" ], ( str, unicode ) ), attributes[ "url" ]
self._url = attributes[ "url" ]
| |
from __future__ import unicode_literals
import warnings
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_raises
from sklearn.utils.random import choice
from sklearn.utils.testing import (assert_equal, assert_false, assert_true,
assert_not_equal, assert_almost_equal,
assert_in, assert_less, assert_greater,
assert_warns_message, assert_raise_message,
clean_warning_registry, SkipTest)
from collections import defaultdict, Mapping
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace('\xe9', 'e')
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ['the_ultimate_feature']
def test_strip_accents():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_unicode(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_unicode(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '\u0627' # simple halef
assert_equal(strip_accents_unicode(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_unicode(a), expected)
def test_to_ascii():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_ascii(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_ascii(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '' # halef has no direct ascii match
assert_equal(strip_accents_ascii(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_ascii(a), expected)
def test_word_analyzer_unigrams():
for Vectorizer in (CountVectorizer, HashingVectorizer):
wa = Vectorizer(strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon']
assert_equal(wa(text), expected)
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ['this', 'is', 'test', 'really', 'met', 'harry',
'yesterday']
assert_equal(wa(text), expected)
wa = Vectorizer(input='file').build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['this', 'is', 'test', 'with', 'file', 'like',
'object']
assert_equal(wa(text), expected)
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
" c'\xe9tait pas tr\xeas bon.")
expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',
'ETAIT', 'PAS', 'TRES', 'BON']
assert_equal(wa(text), expected)
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize,
strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,',
"c'etait", 'pas', 'tres', 'bon.']
assert_equal(wa(text), expected)
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(analyzer="word", strip_accents='unicode',
ngram_range=(1, 2)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',
'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',
'etait pas', 'pas tres', 'tres bon']
assert_equal(wa(text), expected)
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, wa, text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon"
expected = ["j'a", "'ai", 'ai ', 'i m', ' ma']
assert_equal(cnga(text)[:5], expected)
expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']
assert_equal(cnga(text)[-5:], expected)
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [' th', 'thi', 'his', 'is ', ' thi']
assert_equal(cnga(text)[:5], expected)
expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char_wb',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']
assert_equal(cnga(text)[:6], expected)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert_equal(vect.vocabulary_, vocab)
else:
assert_equal(set(vect.vocabulary_), terms)
X = vect.transform(JUNK_FOOD_DOCS)
assert_equal(X.shape[1], len(terms))
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline([
('count', CountVectorizer(vocabulary=what_we_like)),
('tfidf', TfidfTransformer())])
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert_equal(set(pipe.named_steps['count'].vocabulary_),
set(what_we_like))
assert_equal(X.shape[1], len(what_we_like))
def test_countvectorizer_custom_vocabulary_repeated_indeces():
vocab = {"pizza": 0, "beer": 0}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("vocabulary contains repeated indices", str(e).lower())
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("doesn't contain index", str(e).lower())
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words='english')
assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)
cv.set_params(stop_words='_bad_str_stop_')
assert_raises(ValueError, cv.get_stop_words)
cv.set_params(stop_words='_bad_unicode_stop_')
assert_raises(ValueError, cv.get_stop_words)
stoplist = ['some', 'other', 'words']
cv.set_params(stop_words=stoplist)
assert_equal(cv.get_stop_words(), set(stoplist))
def test_countvectorizer_empty_vocabulary():
try:
vect = CountVectorizer(vocabulary=[])
vect.fit(["foo"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
try:
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert_not_equal(X1.shape[1], X2.shape[1])
def test_tf_idf_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# this is robust to features with only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
def test_tfidf_no_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
1. / np.array([0.])
numpy_provides_div0_warning = len(w) == 1
in_warning_message = 'divide by zero'
tfidf = assert_warns_message(RuntimeWarning, in_warning_message,
tr.fit_transform, X).toarray()
if not numpy_provides_div0_warning:
raise SkipTest("Numpy does not provide div 0 warnings.")
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert_equal(tfidf[0], 1)
assert_greater(tfidf[1], tfidf[0])
assert_greater(tfidf[2], tfidf[1])
assert_less(tfidf[1], 2)
assert_less(tfidf[2], 3)
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, 'tocsr'):
counts_train = counts_train.tocsr()
assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2)
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, 'tocsr'):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert_equal(counts_test[0, vocabulary["salad"]], 1)
assert_equal(counts_test[0, vocabulary["tomato"]], 1)
assert_equal(counts_test[0, vocabulary["water"]], 1)
# stop word from the fixed list
assert_false("the" in vocabulary)
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert_false("copyright" in vocabulary)
# not present in the sample
assert_equal(counts_test[0, vocabulary["coke"]], 0)
assert_equal(counts_test[0, vocabulary["burger"]], 0)
assert_equal(counts_test[0, vocabulary["beer"]], 0)
assert_equal(counts_test[0, vocabulary["pizza"]], 0)
# test tf-idf
t1 = TfidfTransformer(norm='l1')
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert_equal(len(t1.idf_), len(v1.vocabulary_))
assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))
# test tf alone
t2 = TfidfTransformer(norm='l1', use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert_equal(t2.idf_, None)
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
assert_raises(ValueError, t3.transform, counts_train)
# test idf transform with incompatible n_features
X = [[1, 1, 5],
[1, 1, 0]]
t3.fit(X)
X_incompt = [[1, 3],
[1, 3]]
assert_raises(ValueError, t3.transform, X_incompt)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm='l1')
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert_false(tv.fixed_vocabulary_)
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
assert_raises(ValueError, v3.transform, train_data)
# ascii preprocessor?
v3.set_params(strip_accents='ascii', lowercase=False)
assert_equal(v3.build_preprocessor(), strip_accents_ascii)
# error on bad strip_accents param
v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
assert_raises(ValueError, v3.build_preprocessor)
# error with bad analyzer type
v3.set_params = '_invalid_analyzer_type_'
assert_raises(ValueError, v3.build_analyzer)
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
sublinear_tf=False)
tv.norm = 'l1'
assert_equal(tv._tfidf.norm, 'l1')
tv.use_idf = True
assert_true(tv._tfidf.use_idf)
tv.smooth_idf = True
assert_true(tv._tfidf.smooth_idf)
tv.sublinear_tf = True
assert_true(tv._tfidf.sublinear_tf)
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert_true(np.min(X.data) > -1)
assert_true(np.min(X.data) < 0)
assert_true(np.max(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')
X = v.transform(ALL_FOOD_DOCS)
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert_true(ngrams_nnz > token_nnz)
assert_true(ngrams_nnz < 2 * token_nnz)
# makes the feature values bounded
assert_true(np.min(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
assert_raises(ValueError, cv.get_feature_names)
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert_equal(len(cv.vocabulary_), n_features)
feature_names = cv.get_feature_names()
assert_equal(len(feature_names), n_features)
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'],
feature_names)
for idx, name in enumerate(feature_names):
assert_equal(idx, cv.vocabulary_.get(name))
def test_vectorizer_max_features():
vec_factories = (
CountVectorizer,
TfidfVectorizer,
)
expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])
expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',
u'sparkling', u'water', u'the'])
for vec_factory in vec_factories:
# test bounded number of extracted features
vectorizer = vec_factory(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)
assert_equal(vectorizer.stop_words_, expected_stop_words)
def test_count_vectorizer_max_features():
# Regression test: max_features didn't work correctly in 0.14.
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = cv_1.get_feature_names()
features_3 = cv_3.get_feature_names()
features_None = cv_None.get_feature_names()
# The most common feature is "the", with frequency 7.
assert_equal(7, counts_1.max())
assert_equal(7, counts_3.max())
assert_equal(7, counts_None.max())
# The most common feature should be the same
assert_equal("the", features_1[np.argmax(counts_1)])
assert_equal("the", features_3[np.argmax(counts_3)])
assert_equal("the", features_None[np.argmax(counts_None)])
def test_vectorizer_max_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', max_df=1.0)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
vect.max_df = 1
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
def test_vectorizer_min_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', min_df=1)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.min_df = 2
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored
assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 4)
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored
assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 5)
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())
assert_array_equal([[3, 1, 1, 0, 0],
[1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0],
[1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert_equal(X_sparse.dtype, np.float32)
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = HashingVectorizer(analyzer='char', non_negative=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X[0:1].data), 3)
assert_equal(np.max(X[1:2].data), 2)
assert_equal(X.dtype, np.float64)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X.data), 1)
assert_equal(X.dtype, np.float64)
# check the ability to change the dtype
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None, dtype=np.float64)
X = vect.transform(test_data)
assert_equal(X.dtype, np.float64)
def test_vectorizer_inverse_transform():
# raw documents
data = ALL_FOOD_DOCS
for vectorizer in (TfidfVectorizer(), CountVectorizer()):
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.2, random_state=0)
pipeline = Pipeline([('vect', CountVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('hinge', 'squared_hinge')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.1, random_state=0)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'vect__norm': ('l1', 'l2'),
'svc__loss': ('hinge', 'squared_hinge'),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
assert_equal(best_vectorizer.norm, 'l2')
assert_false(best_vectorizer.fixed_vocabulary_)
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1., 1., 1.])
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"\xd0\x9c\xd0\xb0\xd1\x88\xd0\xb8\xd0\xbd\xd0\xbd\xd0\xbe\xd0"
"\xb5 \xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0"
"\xb5 \xe2\x80\x94 \xd0\xbe\xd0\xb1\xd1\x88\xd0\xb8\xd1\x80\xd0\xbd"
"\xd1\x8b\xd0\xb9 \xd0\xbf\xd0\xbe\xd0\xb4\xd1\x80\xd0\xb0\xd0\xb7"
"\xd0\xb4\xd0\xb5\xd0\xbb \xd0\xb8\xd1\x81\xd0\xba\xd1\x83\xd1\x81"
"\xd1\x81\xd1\x82\xd0\xb2\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb3"
"\xd0\xbe \xd0\xb8\xd0\xbd\xd1\x82\xd0\xb5\xd0\xbb\xd0\xbb\xd0"
"\xb5\xd0\xba\xd1\x82\xd0\xb0, \xd0\xb8\xd0\xb7\xd1\x83\xd1\x87"
"\xd0\xb0\xd1\x8e\xd1\x89\xd0\xb8\xd0\xb9 \xd0\xbc\xd0\xb5\xd1\x82"
"\xd0\xbe\xd0\xb4\xd1\x8b \xd0\xbf\xd0\xbe\xd1\x81\xd1\x82\xd1\x80"
"\xd0\xbe\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xb0\xd0\xbb\xd0\xb3"
"\xd0\xbe\xd1\x80\xd0\xb8\xd1\x82\xd0\xbc\xd0\xbe\xd0\xb2, \xd1\x81"
"\xd0\xbf\xd0\xbe\xd1\x81\xd0\xbe\xd0\xb1\xd0\xbd\xd1\x8b\xd1\x85 "
"\xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd1"
"\x8f.")
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert_equal(X_counted.shape, (1, 15))
vect = HashingVectorizer(norm=None, non_negative=True)
X_hashed = vect.transform([document])
assert_equal(X_hashed.shape, (1, 2 ** 20))
# No collisions on such a small dataset
assert_equal(X_counted.nnz, X_hashed.nnz)
# When norm is None and non_negative, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ['pizza', 'celeri']
vect = TfidfVectorizer(vocabulary=vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert_true(vect.fixed_vocabulary_)
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm='l1'),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_equal(copy.get_params(), orig.get_params())
assert_array_equal(
copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
orig.fit_transform(JUNK_FOOD_DOCS).toarray())
def test_countvectorizer_vocab_sets_when_pickling():
# ensure that vocabulary of type set is coerced to a list to
# preserve iteration ordering after deserialization
rng = np.random.RandomState(0)
vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'])
for x in range(0, 100):
vocab_set = set(choice(vocab_words, size=5, replace=False,
random_state=rng))
cv = CountVectorizer(vocabulary=vocab_set)
unpickled_cv = pickle.loads(pickle.dumps(cv))
cv.fit(ALL_FOOD_DOCS)
unpickled_cv.fit(ALL_FOOD_DOCS)
assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names())
def test_countvectorizer_vocab_dicts_when_pickling():
rng = np.random.RandomState(0)
vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'])
for x in range(0, 100):
vocab_dict = dict()
words = choice(vocab_words, size=5, replace=False, random_state=rng)
for y in range(0, 5):
vocab_dict[words[y]] = y
cv = CountVectorizer(vocabulary=vocab_dict)
unpickled_cv = pickle.loads(pickle.dumps(cv))
cv.fit(ALL_FOOD_DOCS)
unpickled_cv.fit(ALL_FOOD_DOCS)
assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names())
def test_stop_words_removal():
# Ensure that deleting the stop_words_ attribute doesn't affect transform
fitted_vectorizers = (
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS)
)
for vect in fitted_vectorizers:
vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
vect.stop_words_ = None
stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
delattr(vect, 'stop_words_')
stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
assert_array_equal(stop_None_transform, vect_transform)
assert_array_equal(stop_del_transform, vect_transform)
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_array_equal(
copy.fit_transform(X).toarray(),
orig.fit_transform(X).toarray())
def test_non_unique_vocab():
vocab = ['a', 'b', 'c', 'a', 'a']
vect = CountVectorizer(vocabulary=vocab)
assert_raises(ValueError, vect.fit, [])
def test_hashingvectorizer_nan_in_docs():
# np.nan can appear when using pandas to load text fields from a csv file
# with missing values.
message = "np.nan is an invalid document, expected byte or unicode string."
exception = ValueError
def func():
hv = HashingVectorizer()
hv.fit_transform(['hello world', np.nan, 'hello hello'])
assert_raise_message(exception, message, func)
def test_tfidfvectorizer_binary():
# Non-regression test: TfidfVectorizer used to ignore its "binary" param.
v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
assert_true(v.binary)
X = v.fit_transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X.ravel(), [1, 1, 1, 0])
X2 = v.transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
def test_vectorizer_vocab_clone():
vect_vocab = TfidfVectorizer(vocabulary=["the"])
vect_vocab_clone = clone(vect_vocab)
vect_vocab.fit(ALL_FOOD_DOCS)
vect_vocab_clone.fit(ALL_FOOD_DOCS)
assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_)
def test_vectorizer_string_object_as_input():
message = ("Iterable over raw text documents expected, "
"string object received.")
for vec in [CountVectorizer(), TfidfVectorizer(), HashingVectorizer()]:
assert_raise_message(
ValueError, message, vec.fit_transform, "hello world!")
assert_raise_message(
ValueError, message, vec.fit, "hello world!")
assert_raise_message(
ValueError, message, vec.transform, "hello world!")
| |
"""
Several reinforcement learning algorithms.
If they begin with a 'b_' they were implemented allow fitting of behavioral
accuracy data. If they begin with a 's_' there were designed to simulate
'online' learning (e.g. a computational agent learning an N-armed bandit
task).
"""
import math
def b_delta(rewards,states,alpha):
"""
Implements the Resorla-Wagner (delta) learning rule.
V_intial is 0. Note: Null (0 or '0') states are silently skipped.
Returns two dictionaries containing value and RPE timecourses,
for each state.
"""
# Init
s_names = set(states)
V_dict = {}
RPE_dict = {}
for s in s_names:
V_dict[s] = [0.]
RPE_dict[s] = []
for r,s in zip(rewards,states):
## Skip terminal states
if (s == 0) | (s == '0'):
continue
V = V_dict[s][-1]
## the Delta rule:
RPE = r - V
V_new = V + alpha * RPE
## Store and shift V_new to
## V for next iter
V_dict[s].append(V_new)
## Store RPE
RPE_dict[s].append(RPE)
return V_dict, RPE_dict
def b_delta_similarity(rewards,states,similarity,alpha):
"""
Implements the delta learning rule where the reward value is
diminshed by the provded distances. V_intial is 0. Note: Null
(0 or '0') states are silently skipped.
Reducing the reward value by the distance between a reward exmplar
and its (possible) category representation is the subject of
my dissertation research.
Returns two dictionaries containing value and RPE timecourses,
for each state.
"""
# Init
s_names = set(states)
V_dict = {}
RPE_dict = {}
r_sim_dict = {}
for s in s_names:
V_dict[s] = [0.]
RPE_dict[s] = []
r_sim_dict[s] = []
for r,s,d in zip(rewards,states,similarity):
## Skip terminal states
if (s == 0) | (s == '0'):
continue
V = V_dict[s][-1]
## the Delta rule:
r_sim = r * d
RPE = r_sim - V
V_new = V + alpha * RPE
## Store and shift V_new to
## V for next iter
V_dict[s].append(V_new)
## Store RPE
RPE_dict[s].append(RPE)
## Store r_sim
r_sim_dict[s].append(r_sim)
return V_dict, RPE_dict, r_sim_dict
def b_td_0(rewards,states,alpha):
"""
Implements Sutton and Barto's temporal difference algorithm, assuming
gamma is 1. All V (values) initialized at zero.
Arbitrary numbers of states are allowed; to simplify it was assumed
that once started the markov process continues until the terminal state
is achieved.
Each trial is composed of one set of states which are contiguously
packed separated only by null (0), that is to say terminal, states, the
(empty) terminal state.
Returns Qs a dict of lists and RPEs (list), in that order.
"""
## Taken form,
## Sutton And Barto, Reinforcement Learning:
## An Introduction, MIT Press, 1998,
## TD is:
## Intializa V(s) and pi (policy)
## Repeat (for each trial)
## Intializa s
## Repeat (for each step/state)
## a is the action given by pi for s
## Take a; observe reward (r) and the next state (s')
## V(s) <- V(s) + alpha * (r + gamma * V(s') - V(s))
## s <- s'
## until s is terminal
gamma = 1
## Init V_dict, and RPE_list
s_names = list(set(states))
V_dict = {}
RPE_dict = {}
for s in s_names:
V_dict[s] = [0.]
RPE_dict[s] = []
# RPE should always
# be n-1 compared to V
for step in range(len(states)-1):
r = rewards[step]
s = states[step]
s_plus = states[step+1]
## Define values but then chck and
## make sure were not in or before
## a terminal state.
V_s = V_dict[s][-1]
V_s_plus = V_dict[s_plus][-1]
if (s == 0) | (s == '0'):
print '@NULL'
continue
# If we are terminal, terminate
elif (s_plus == 0) | (s_plus == '0'):
V_s_plus = 0
# if the next state is terminal,
# V_s_plus must be zero;
# enforce that.
print('step{0}, s{1}, s_plus{3}, r{2}'.format(step,s,r,s_plus))
## And, finally, do the TD calculations.
RPE = r + (gamma * (V_s_plus - V_s))
V_s_new = V_s + (alpha * RPE)
print '\t\tRPE {0}, V_s {1}'.format(RPE,V_s)
V_dict[s].append(V_s_new)
RPE_dict[s].append(RPE)
return V_dict, RPE_dict
def b_rc(actions,rewards,beta):
"""
UNTESTED.
Inplements Sutton And Barto's (1998) 'reinforcement comparison'
algorithmn. Returning P(action) for each action at each timestep in a
dict and the accompanying accumulative reference reward (i.e. the
inline reward average) and reference predicion error (RPE).
Beta is the step size (0-1). Rewards in this model may be any real
number (unlike sat most td implementations who are bound between 0-1).
"""
## In simulation action selection policy is set by softmax:
## In this example there are three possible actions, extends to N
## P(a_t = a) = e^P_t(a) / sum( e^P_t(b) + e^P_r(c) + ...)
## Will display probability matching....
ref_reward = 0
P_dict = {}
ref_reward_dict = {}
RPE_dict = {} # the reference prediction error (RPE)
for a in set(actions):
## Init P_dict, and the rest.
## Start P_intial and ref_reward_dict as 0;
## RPE is empty, keeping them in sink.
P_dict[a] = [0]
ref_reward_dict[a] = [0]
RPE_dict[a] = []
for a,r in actions,rewards:
## Do calcs, then update dicts
ref_reward = ref_reward_dict[a][-1]
RPE = r - ref_reward
P_a_tminus = P_dict[a][-1]
P_a_t = P_a_tminus + (beta * RPE)
P_dict[a] = P_dict[a].append(P_a_t)
ref_reward_dict[a] = ref_reward_dict[a].\
append((ref_reward + r) / 2)
RPE_dict[a] = RPE_dict[a].append(RPE)
return P_dict, ref_reward_dict, RPE_dict
| |
from distutils import util, log
from distutils.errors import DistutilsSetupError
from distutils import sysconfig
from distutils.command.build_scripts import build_scripts as _build_scripts
import sys, os, re, glob
try:
from Cython.Distutils import build_ext
has_cython = True
except ImportError:
from distutils.command.build_ext import build_ext
has_cython = False
class build_clipper_ext(build_ext):
def build_extensions(self):
self.compiler.find_library_file = self._find_library_file
self.find_libraries()
compiler = os.getenv('CC')
args = {}
# unfortunately, distutils doesn't let us provide separate C and C++
# compilers
if compiler is not None:
(ccshared,cflags) = sysconfig.get_config_vars('CCSHARED','CFLAGS')
args['compiler_so'] = compiler + ' ' + ccshared + ' ' + cflags
self.compiler.set_executables(**args)
build_ext.build_extensions(self)
def _find_library_file(self, dirs, lib, debug=0):
# Ok if lib has already form of lib*.* then skip generating name
if re.match("lib.*\.(?:dylib|so|a)", lib):
shared_f = dylib_f = static_f = lib
else:
# Copied from distutils.unixcompiler
shared_f = self.compiler.library_filename(lib, lib_type='shared')
dylib_f = self.compiler.library_filename(lib, lib_type='dylib')
static_f = self.compiler.library_filename(lib, lib_type='static')
if sys.platform == 'darwin':
# On OSX users can specify an alternate SDK using
# '-isysroot', calculate the SDK root if it is specified
# (and use it further on)
cflags = sysconfig.get_config_var('CFLAGS')
m = re.search(r'-isysroot\s+(\S+)', cflags)
if m is None:
sysroot = '/'
else:
sysroot = m.group(1)
for dir in dirs:
shared = os.path.join(dir, shared_f)
dylib = os.path.join(dir, dylib_f)
static = os.path.join(dir, static_f)
if sys.platform == 'darwin' and (
dir.startswith('/System/') or (
dir.startswith('/usr/') and not dir.startswith('/usr/local/'))):
shared = os.path.join(sysroot, dir[1:], shared_f)
dylib = os.path.join(sysroot, dir[1:], dylib_f)
static = os.path.join(sysroot, dir[1:], static_f)
# We can have development libraries names...
shared_dev = glob.glob(shared+'.*')
if len(shared_dev) > 0:
shared_dev = shared_dev[0]
else:
shared_dev = None
dylib_dev = glob.glob(dylib.replace('dylib', '*.dylib'))
if len(dylib_dev) > 0:
dylib_dev = dylib_dev[0]
else:
dylib_dev = None
static_dev = None
# We're second-guessing the linker here, with not much hard
# data to go on: GCC seems to prefer the shared library, so I'm
# assuming that *all* Unix C compilers do. And of course I'm
# ignoring even GCC's "-static" option. So sue me.
if os.path.exists(dylib):
return dylib
elif dylib_dev:
return dylib_dev
elif os.path.exists(shared):
return shared
elif shared_dev:
return shared_dev
elif os.path.exists(static):
return static
# Oops, didn't find it in *any* of 'dirs'
return None
def find_libraries(self):
#platform = util.get_platform()
platform = sys.platform
lib_dirs = self.compiler.library_dirs
ldflags = sysconfig.get_config_vars('LDFLAGS')
for i in ldflags:
for item in i.split():
if item.startswith('-L'):
lib_dirs.append(item[2:])
lib_dirs.extend(sysconfig.get_config_vars('LIBDIR'))
# Add current LD_LIBRARY_PATHs
env_vars = ['LD_LIBRARY_PATH']
if platform == 'darwin':
env_vars = ['DYLD_LIBRARY_PATH', 'DYLD_FALLBACK_LIBRARY_PATH']
for item in env_vars:
val = os.getenv(item)
if not val is None:
dirs = val.split(':')
lib_dirs.extend(dirs)
lib_dirs.append(os.path.join(sys.exec_prefix, 'lib'))
lib_dirs.append(os.path.join(os.getenv('HOME'), 'lib'))
lib_dirs.extend(['/usr/lib', '/usr/lib'])
lib_dirs.append('/usr/local/lib')
# Determine where CCP4 libraries are
ccp4_dir = os.getenv('CCP4')
if not ccp4_dir is None:
lib_dirs.append(os.path.join(ccp4_dir, 'lib'))
# Check if we have everything
clipper_core_lib_name = 'clipper-core'
clipper_core_lib = self._find_library_file(lib_dirs, clipper_core_lib_name)
if clipper_core_lib is None:
raise DistutilsSetupError("Could not find CLIPPER libraries. Paths searched %s" % lib_dirs )
elif platform == 'darwin':
try:
# If we have found only dev named library we have to change library name
suffix = re.findall('%s(.*)\.dylib' % clipper_core_lib_name, clipper_core_lib)[0]
clipper_core_lib_name += suffix
except IndexError:
pass
clipper_ccp4_lib_name = 'clipper-ccp4'
clipper_ccp4_lib = self._find_library_file(lib_dirs, clipper_ccp4_lib_name)
if clipper_ccp4_lib is None:
raise DistutilsSetupError("Please compile CLIPPER with CCP4 support")
elif platform == 'darwin':
try:
# If we have found only dev named library we have to change library name
suffix = re.findall('%s(.*)\.dylib' % clipper_ccp4_lib_name, clipper_ccp4_lib)[0]
clipper_ccp4_lib_name += suffix
except IndexError:
pass
clipper_mmdb_lib_name = 'clipper-mmdb'
clipper_mmdb_lib = self._find_library_file(lib_dirs, clipper_mmdb_lib_name)
if clipper_mmdb_lib is None:
raise DistutilsSetupError("Please compile CLIPPER with MMDB support")
elif platform == 'darwin':
try:
# If we have found only dev named library we have to change library name
suffix = re.findall('%s(.*)\.dylib' % clipper_mmdb_lib_name, clipper_mmdb_lib)[0]
clipper_mmdb_lib_name += suffix
except IndexError:
pass
mmdb_lib_name = 'mmdb2'
mmdb_lib = self._find_library_file(lib_dirs, mmdb_lib_name)
if mmdb_lib is None:
raise DistutilsSetupError("Please compile CLIPPER with MMDB support")
elif platform == 'darwin':
try:
# If we have found only dev named library we have to change library name
suffix = re.findall('%s(.*)\.dylib' % mmdb_lib_name, mmdb_lib)[0]
mmdb_lib_name += suffix
except IndexError:
pass
ccp4_lib_name = 'ccp4c'
ccp4_lib = self._find_library_file(lib_dirs, ccp4_lib_name)
if ccp4_lib is None:
raise DistutilsSetupError("Could not find CCP4 libraries. Paths searched %s" % lib_dirs)
elif platform == 'darwin':
# If we have found only dev named library we have to change library name
try:
suffix = re.findall('%s(.*)\.dylib' % ccp4_lib_name, ccp4_lib)[0]
ccp4_lib_name += suffix
except IndexError:
pass
log.info("Linking against following CLIPPER libraries:\n\t%s\n\t%s\n\t%s\n\t%s"
% (clipper_core_lib, clipper_ccp4_lib, clipper_mmdb_lib, mmdb_lib))
log.info("Linking against following CCP4 library:\n\t%s"
% ccp4_lib)
libs = [ os.path.dirname(l) for l in [clipper_core_lib,
clipper_ccp4_lib, clipper_mmdb_lib, mmdb_lib, ccp4_lib] ]
[self.compiler.library_dirs.append(l) for l in libs
if os.path.exists(l) and l not in self.compiler.library_dirs]
headers = [ os.path.join(os.path.dirname(l),'include') for l in libs]
[self.compiler.include_dirs.append(h) for h in headers
if os.path.exists(h) and h not in self.compiler.include_dirs]
# Ok first check if user didn't provide multiple libraries in command line
# Workaround to Python issue #1326113
# assuming no space in names
libraries = []
for item in self.compiler.libraries:
if ' ' in item:
libraries.extend(item.split(' '))
else:
libraries.append(item)
self.compiler.libraries = libraries
self.compiler.libraries.extend([mmdb_lib_name, ccp4_lib_name, clipper_core_lib_name, clipper_mmdb_lib_name, clipper_ccp4_lib_name, 'clipper-contrib'])
print 'INC', self.compiler.include_dirs
print 'LIB', self.compiler.libraries
#TODO: Do we need that ?
class build_scripts(_build_scripts):
wrapper_script = 'scripts/wrapper'
user_options = _build_scripts.user_options
user_options.append(
('wrap', None, 'wrap with bash script')
)
boolean_options = _build_scripts.boolean_options
boolean_options.append('wrap')
def initialize_options(self):
_build_scripts.initialize_options(self)
self.wrap = False
self.wrap_temp = None
def finalize_options(self):
_build_scripts.finalize_options(self)
wrapper_fn = util.convert_path(self.wrapper_script)
try:
wrapper = open(self.wrapper_script, 'r')
except IOError:
self.warn("Skipping wrapping: No such file: %s" % wrapper_fn)
self.wrap = False
else:
self.wrapper = wrapper.read()
if '%%BODY%%' not in self.wrapper:
self.warn("Skipping wrapping: Malfomed wrapper script (no %%BODY%%)");
self.wrap = False
if self.wrap:
self.set_undefined_options('build',
('build_temp', 'wrap_temp')
)
def wrap_scripts(self):
wrapped_scripts = []
for script in self.scripts:
try:
f = open(script, "r")
except IOError:
if not self.dry_run:
raise
f = None
else:
data = f.read()
if not data:
self.warn("%s is an empty file (skipping)" % script)
continue
wrapped_data = self.wrapper.replace('%%BODY%%', data)
outfile = os.path.join(self.wrap_temp, os.path.basename(script))
wrapped_scripts.append(outfile)
self.mkpath(self.wrap_temp)
outf = open(outfile, 'w')
outf.write(wrapped_data)
outf.close()
self.scripts = wrapped_scripts
def copy_scripts (self):
if self.wrap:
self.wrap_scripts()
_build_scripts.copy_scripts(self)
| |
"""Library of useful functions that apply to many modules."""
from os import getcwd, path
import logging.handlers
import sys
try:
import yaml
except ImportError as err:
sys.stderr.write("ERROR: {}. Try installing python-yaml.\n".format(err))
raise
# Global objects
_config = None
_loaded_config_file = None
_logger = None
default_config = "config.yaml"
# Types
class Enum(tuple):
"""Simple enumeration type based on tuple with integer values."""
__getattr__ = tuple.index
fromString = tuple.index
def toString(self, value):
return self[value]
def get_config(config_file=None):
"""Load and return configuration options.
Note that this config is only loaded once (it's a singleton).
:param config_file: YAML file to load config from.
:type config_file: string
:returns: Dict description of configuration.
"""
# Don't load config file if it is already loaded (and filename matches)
global _config, _loaded_config_file
# We can't simply throw default_config in as a default parameter. Doing so
# would make it impossible to differentiate between an unspecified config
# (which should use the default only when no config has been loaded) versus
# a desire to actually load the default config explicitly.
if config_file is None:
if _loaded_config_file is None:
config_file = default_config
else:
return _config
if config_file == _loaded_config_file:
return _config
try:
with open(config_file) as config_fd:
_config = yaml.load(config_fd)
_loaded_config_file = config_file
return _config
except FileNotFoundError:
sys.stderr.write("{} not found".format(config_file))
def write_config(new_config):
"""Write an updated version of config to global _config.
:param new_config: Updated version of config to write.
:type new_config: dict
"""
global _config
_config = new_config
def load_strategy(strat_file=None):
"""Load the YAML description of the strategy for this round.
:param strat_file: Name of strategy file to load.
:returns: Dict description of strategy for this round.
"""
if strat_file is None:
config = get_config()
strat_file = config["strategy"]
# Build valid path from CWD to strategy file
qual_strat_file = strat_file
# Open and read strategy file
with open(qual_strat_file) as strat_fd:
return yaml.load(strat_fd)
def set_testing(state, config_file=None):
"""Set the testing flag in config to the given boolean value.
The testing flag is used to either load point code to simulated hardware
files or real file descriptions on the BBB.
:param state: Value to set testing config flag to.
:type state: boolean
:param config_file: YAML file to write config to.
:type config_file: string
:raises: TypeError
"""
# Confirm that state is a boolean value
if type(state) != bool:
raise TypeError("State must be a boolean, not updating.")
# Get current config
if config_file is None:
config = get_config()
else:
config = get_config(config_file)
# Update config with new testing state
config["testing"] = state
# Write updated config
write_config(config)
def get_logger():
"""Build and return a logger for formatted stream and file output.
Note that if a logger has already been built, a new one will not
be created. The previously created logger will be returned. This
prevents running unnecessary setup twice, as well as premature logrolls.
Two logger objects would not actually be created, as logger acts as
a singleton.
TODO (dfarrell07): Refactor to not use a global
:returns: The constructed logging object.
"""
# Don't run setup if logger has been built (would logroll early)
global _logger
if _logger is not None:
return _logger
# Get config so that path to log file can be read.
config = get_config()
# Get path from repo root to log file
log_file = config["logging"]["log_file"]
# Build logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# Check if log exists and should therefore be rolled
needRoll = False
if path.isfile(log_file):
needRoll = True
# Build file output formatter
file_formatter = logging.Formatter("%(asctime)s | %(levelname)s | "
"%(filename)s | %(funcName)s | "
"%(lineno)d | %(message)s")
# Build stream output formatter
stream_formatter = logging.Formatter("%(filename)s | %(funcName)s | "
"%(lineno)d | %(levelname)s | "
"%(message)s")
# Build file handler (for output log output to files)
file_handler = logging.handlers.RotatingFileHandler(log_file,
mode="a",
backupCount=50,
delay=True)
file_handler_level = getattr(logging,
config["logging"]["file_handler_level"].upper(),
logging.DEBUG)
file_handler.setLevel(file_handler_level)
file_handler.setFormatter(file_formatter)
# Build stream handler (for output to stdout)
stream_handler = logging.StreamHandler()
stream_handler_level = getattr(logging,
config["logging"]["stream_handler_level"].upper(),
logging.INFO)
stream_handler.setLevel(stream_handler_level)
stream_handler.setFormatter(stream_formatter)
# Add handlers to logger
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
# This is a stale log, so roll it
if needRoll:
logger.handlers[0].doRollover()
logger.debug("Logger built")
_logger = logger
return logger
def api_call(f):
"""Decorator used to register a method so that it becomes callable through
the API.
TODO(dfarrell07): Shorten summary, add longer note here if needed.
:param f: TODO
:type f: TODO
:returns: TODO
"""
f.__api_call = True
return f
| |
from django.db import connection
from django.utils.translation import ugettext_noop as _
from api import status
from api.api_views import APIView
from api.exceptions import PermissionDenied, PreconditionRequired
from api.signals import dc_relationship_changed, group_relationship_changed
from api.task.response import SuccessTaskResponse, FailureTaskResponse
from api.task.log import delete_tasklog_cached
from api.utils.db import get_object
from api.utils.views import call_api_view
from api.dc.utils import get_dc, get_dcs, remove_dc_binding_virt_object
from api.dc.base.serializers import DcSerializer, SuperDcSerializer, ExtendedDcSerializer, DefaultDcSettingsSerializer
from api.dc.messages import LOG_DC_CREATE, LOG_DC_UPDATE
from api.accounts.user.utils import remove_user_dc_binding
from api.accounts.messages import LOG_GROUP_UPDATE
from api.messages import LOG_VIRT_OBJECT_UPDATE_MESSAGES
from vms.models import Dc, DefaultDc
from gui.models import User
class DcView(APIView):
serializer = DcSerializer
order_by_default = order_by_fields = ('name',)
order_by_field_map = {'created': 'id'}
def __init__(self, request, name, data):
super(DcView, self).__init__(request)
self.data = data
self.name = name
if request.user.is_staff:
self.serializer = SuperDcSerializer
if self.extended:
self.serializer = ExtendedDcSerializer
extra = {'select': ExtendedDcSerializer.extra_select}
else:
extra = None
if name:
# Return dc from cache (this will also check user permissions for requested DC)
# We do this because GET is available for anybody
if request.method == 'GET':
get_dc(request, name)
# But we will use this fresh DC object
self.dc = get_object(request, Dc, {'name': name}, sr=('owner',), extra=extra)
# Update current datacenter to log tasks for this dc
request.dc = self.dc
else:
# GET many is available for anybody
if (request.user.is_staff or self.full) or self.extended:
pr = ('roles',)
else:
pr = None
self.dc = get_dcs(request, sr=('owner',), pr=pr, extra=extra, order_by=self.order_by)
def is_extended(self, data):
# Only SuperAdmin has access to extended stats!
return super(DcView, self).is_extended(data) and self.request.user.is_staff
def get(self, many=False):
return self._get(self.dc, many=many)
def _remove_user_dc_binding(self, task_id, owner=None, groups=None):
dc = self.dc
if owner:
remove_user_dc_binding(task_id, owner, dc=dc)
if groups:
for user in User.objects.distinct().filter(roles__in=groups, dc_bound__isnull=False).exclude(dc_bound=dc):
remove_user_dc_binding(task_id, user, dc=dc)
def post(self):
dc = self.dc
request = self.request
if not DefaultDc().settings.VMS_DC_ENABLED:
raise PermissionDenied
dc.owner = request.user # just a default
dc.alias = dc.name # just a default
ser = self.serializer(request, dc, data=self.data)
if not ser.is_valid():
return FailureTaskResponse(request, ser.errors, obj=dc)
# Create default custom settings suitable for new DC (without global settings)
default_custom_settings = DefaultDc().custom_settings.copy()
for key in DefaultDcSettingsSerializer.get_global_settings():
try:
del default_custom_settings[key]
except KeyError:
pass
# Copy custom settings from default DC and save new DC
ser.object.custom_settings = default_custom_settings
ser.save()
res = SuccessTaskResponse(request, ser.data, status=status.HTTP_201_CREATED, obj=dc,
detail_dict=ser.detail_dict(), msg=LOG_DC_CREATE)
dcs = dc.settings
task_id = res.data.get('task_id')
connection.on_commit(lambda: dc_relationship_changed.send(task_id, dc_name=dc.name)) # Signal!
# Changing DC groups affects the group.dc_bound flag
if dc.roles.exists():
# The groups that are added to newly created DC should not be DC-bound anymore
for group in dc.roles.all():
if group.dc_bound:
remove_dc_binding_virt_object(task_id, LOG_GROUP_UPDATE, group, user=request.user)
connection.on_commit(lambda: group_relationship_changed.send(task_id, dc_name=dc.name, # Signal!
group_name=group.name))
# Creating new DC can affect the dc_bound flag on users (owner + users from dc.groups)
self._remove_user_dc_binding(task_id, owner=dc.owner, groups=dc.roles.all())
# Create association with default server domain
if dcs.DNS_ENABLED:
from api.dc.domain.views import dc_domain
call_api_view(request, None, dc_domain, dcs.VMS_VM_DOMAIN_DEFAULT, data={'dc': dc}, log_response=True)
# Create association with default rescue CD
if dcs.VMS_ISO_RESCUECD:
from api.dc.iso.views import dc_iso
call_api_view(request, None, dc_iso, dcs.VMS_ISO_RESCUECD, data={'dc': dc}, log_response=True)
return res
def put(self):
dc = self.dc
request = self.request
ser = self.serializer(request, dc, data=self.data, partial=True)
if not ser.is_valid():
return FailureTaskResponse(request, ser.errors, obj=dc)
ser.save()
res = SuccessTaskResponse(request, ser.data, obj=dc, detail_dict=ser.detail_dict(), msg=LOG_DC_UPDATE)
task_id = res.data.get('task_id')
# Changing DC groups affects the group.dc_bound flag
if ser.groups_changed:
# The groups that are removed or added should not be DC-bound anymore
for group in ser.groups_changed:
if group.dc_bound:
remove_dc_binding_virt_object(task_id, LOG_GROUP_UPDATE, group, user=request.user)
connection.on_commit(lambda: group_relationship_changed.send(task_id, dc_name=dc.name, # Signal!
group_name=group.name))
# After changing the DC owner or changing DC groups we have to invalidate the list of admins for this DC
if ser.owner_changed or ser.groups_changed:
User.clear_dc_admin_ids(dc)
# Remove user.dc_bound flag for new DC owner
# Remove user.dc_bound flag for users in new dc.groups, which are DC-bound, but not to this datacenter
self._remove_user_dc_binding(task_id, owner=dc.owner, groups=ser.groups_added)
connection.on_commit(lambda: dc_relationship_changed.send(task_id, dc_name=dc.name)) # Signal!
# When a user is removed as owner from non-default DC or groups are changed on a non-default DC
# we have to update the current_dc on every affected user, because he could remain access to this DC
# (this is because get_dc() uses current_dc as a shortcut)
if not dc.is_default():
if ser.owner_changed and not ser.owner_changed.is_staff:
ser.owner_changed.reset_current_dc()
if ser.removed_users:
for user in ser.removed_users.select_related('default_dc').exclude(is_staff=True):
user.reset_current_dc()
return res
def delete(self):
dc, request = self.dc, self.request
if dc.is_default():
raise PreconditionRequired(_('Default datacenter cannot be deleted'))
if dc.dcnode_set.exists():
raise PreconditionRequired(_('Datacenter has nodes')) # also "checks" DC backups
if dc.vm_set.exists():
raise PreconditionRequired(_('Datacenter has VMs'))
if dc.backup_set.exists():
raise PreconditionRequired(_('Datacenter has backups')) # should be checked by dcnode check above
dc_id = dc.id
ser = self.serializer(request, dc)
dc_bound_objects = dc.get_bound_objects()
# After deleting a DC the current_dc is automatically set to DefaultDc by the on_delete db field parameter
ser.object.delete()
# Remove cached tasklog for this DC (DB tasklog entries will be remove automatically)
delete_tasklog_cached(dc_id)
res = SuccessTaskResponse(request, None) # no msg => won't be logged
# Every DC-bound object looses their DC => becomes DC-unbound
task_id = res.data.get('task_id')
connection.on_commit(lambda: dc_relationship_changed.send(task_id, dc_name=ser.object.name)) # Signal!
# Update bound virt objects to be DC-unbound after DC removal
for model, objects in dc_bound_objects.items():
msg = LOG_VIRT_OBJECT_UPDATE_MESSAGES.get(model, None)
if objects and msg:
for obj in objects:
if obj.dc_bound:
# noinspection PyUnresolvedReferences
remove_dc_binding_virt_object(task_id, msg, obj, user=request.user, dc_id=DefaultDc.id)
return res
| |
#
# The Python Imaging Library.
# $Id$
#
# standard image operations
#
# History:
# 2001-10-20 fl Created
# 2001-10-23 fl Added autocontrast operator
# 2001-12-18 fl Added Kevin's fit operator
# 2004-03-14 fl Fixed potential division by zero in equalize
# 2005-05-05 fl Fixed equalize for low number of values
#
# Copyright (c) 2001-2004 by Secret Labs AB
# Copyright (c) 2001-2004 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
import functools
import operator
from . import Image
#
# helpers
def _border(border):
if isinstance(border, tuple):
if len(border) == 2:
left, top = right, bottom = border
elif len(border) == 4:
left, top, right, bottom = border
else:
left = top = right = bottom = border
return left, top, right, bottom
def _color(color, mode):
if isinstance(color, str):
from . import ImageColor
color = ImageColor.getcolor(color, mode)
return color
def _lut(image, lut):
if image.mode == "P":
# FIXME: apply to lookup table, not image data
raise NotImplementedError("mode P support coming soon")
elif image.mode in ("L", "RGB"):
if image.mode == "RGB" and len(lut) == 256:
lut = lut + lut + lut
return image.point(lut)
else:
raise OSError("not supported for this image mode")
#
# actions
def autocontrast(image, cutoff=0, ignore=None):
"""
Maximize (normalize) image contrast. This function calculates a
histogram of the input image, removes **cutoff** percent of the
lightest and darkest pixels from the histogram, and remaps the image
so that the darkest pixel becomes black (0), and the lightest
becomes white (255).
:param image: The image to process.
:param cutoff: How many percent to cut off from the histogram.
:param ignore: The background pixel value (use None for no background).
:return: An image.
"""
histogram = image.histogram()
lut = []
for layer in range(0, len(histogram), 256):
h = histogram[layer : layer + 256]
if ignore is not None:
# get rid of outliers
try:
h[ignore] = 0
except TypeError:
# assume sequence
for ix in ignore:
h[ix] = 0
if cutoff:
# cut off pixels from both ends of the histogram
# get number of pixels
n = 0
for ix in range(256):
n = n + h[ix]
# remove cutoff% pixels from the low end
cut = n * cutoff // 100
for lo in range(256):
if cut > h[lo]:
cut = cut - h[lo]
h[lo] = 0
else:
h[lo] -= cut
cut = 0
if cut <= 0:
break
# remove cutoff% samples from the hi end
cut = n * cutoff // 100
for hi in range(255, -1, -1):
if cut > h[hi]:
cut = cut - h[hi]
h[hi] = 0
else:
h[hi] -= cut
cut = 0
if cut <= 0:
break
# find lowest/highest samples after preprocessing
for lo in range(256):
if h[lo]:
break
for hi in range(255, -1, -1):
if h[hi]:
break
if hi <= lo:
# don't bother
lut.extend(list(range(256)))
else:
scale = 255.0 / (hi - lo)
offset = -lo * scale
for ix in range(256):
ix = int(ix * scale + offset)
if ix < 0:
ix = 0
elif ix > 255:
ix = 255
lut.append(ix)
return _lut(image, lut)
def colorize(image, black, white, mid=None, blackpoint=0, whitepoint=255, midpoint=127):
"""
Colorize grayscale image.
This function calculates a color wedge which maps all black pixels in
the source image to the first color and all white pixels to the
second color. If **mid** is specified, it uses three-color mapping.
The **black** and **white** arguments should be RGB tuples or color names;
optionally you can use three-color mapping by also specifying **mid**.
Mapping positions for any of the colors can be specified
(e.g. **blackpoint**), where these parameters are the integer
value corresponding to where the corresponding color should be mapped.
These parameters must have logical order, such that
**blackpoint** <= **midpoint** <= **whitepoint** (if **mid** is specified).
:param image: The image to colorize.
:param black: The color to use for black input pixels.
:param white: The color to use for white input pixels.
:param mid: The color to use for midtone input pixels.
:param blackpoint: an int value [0, 255] for the black mapping.
:param whitepoint: an int value [0, 255] for the white mapping.
:param midpoint: an int value [0, 255] for the midtone mapping.
:return: An image.
"""
# Initial asserts
assert image.mode == "L"
if mid is None:
assert 0 <= blackpoint <= whitepoint <= 255
else:
assert 0 <= blackpoint <= midpoint <= whitepoint <= 255
# Define colors from arguments
black = _color(black, "RGB")
white = _color(white, "RGB")
if mid is not None:
mid = _color(mid, "RGB")
# Empty lists for the mapping
red = []
green = []
blue = []
# Create the low-end values
for i in range(0, blackpoint):
red.append(black[0])
green.append(black[1])
blue.append(black[2])
# Create the mapping (2-color)
if mid is None:
range_map = range(0, whitepoint - blackpoint)
for i in range_map:
red.append(black[0] + i * (white[0] - black[0]) // len(range_map))
green.append(black[1] + i * (white[1] - black[1]) // len(range_map))
blue.append(black[2] + i * (white[2] - black[2]) // len(range_map))
# Create the mapping (3-color)
else:
range_map1 = range(0, midpoint - blackpoint)
range_map2 = range(0, whitepoint - midpoint)
for i in range_map1:
red.append(black[0] + i * (mid[0] - black[0]) // len(range_map1))
green.append(black[1] + i * (mid[1] - black[1]) // len(range_map1))
blue.append(black[2] + i * (mid[2] - black[2]) // len(range_map1))
for i in range_map2:
red.append(mid[0] + i * (white[0] - mid[0]) // len(range_map2))
green.append(mid[1] + i * (white[1] - mid[1]) // len(range_map2))
blue.append(mid[2] + i * (white[2] - mid[2]) // len(range_map2))
# Create the high-end values
for i in range(0, 256 - whitepoint):
red.append(white[0])
green.append(white[1])
blue.append(white[2])
# Return converted image
image = image.convert("RGB")
return _lut(image, red + green + blue)
def pad(image, size, method=Image.BICUBIC, color=None, centering=(0.5, 0.5)):
"""
Returns a sized and padded version of the image, expanded to fill the
requested aspect ratio and size.
:param image: The image to size and crop.
:param size: The requested output size in pixels, given as a
(width, height) tuple.
:param method: What resampling method to use. Default is
:py:attr:`PIL.Image.BICUBIC`. See :ref:`concept-filters`.
:param color: The background color of the padded image.
:param centering: Control the position of the original image within the
padded version.
(0.5, 0.5) will keep the image centered
(0, 0) will keep the image aligned to the top left
(1, 1) will keep the image aligned to the bottom
right
:return: An image.
"""
im_ratio = image.width / image.height
dest_ratio = size[0] / size[1]
if im_ratio == dest_ratio:
out = image.resize(size, resample=method)
else:
out = Image.new(image.mode, size, color)
if im_ratio > dest_ratio:
new_height = int(image.height / image.width * size[0])
if new_height != size[1]:
image = image.resize((size[0], new_height), resample=method)
y = int((size[1] - new_height) * max(0, min(centering[1], 1)))
out.paste(image, (0, y))
else:
new_width = int(image.width / image.height * size[1])
if new_width != size[0]:
image = image.resize((new_width, size[1]), resample=method)
x = int((size[0] - new_width) * max(0, min(centering[0], 1)))
out.paste(image, (x, 0))
return out
def crop(image, border=0):
"""
Remove border from image. The same amount of pixels are removed
from all four sides. This function works on all image modes.
.. seealso:: :py:meth:`~PIL.Image.Image.crop`
:param image: The image to crop.
:param border: The number of pixels to remove.
:return: An image.
"""
left, top, right, bottom = _border(border)
return image.crop((left, top, image.size[0] - right, image.size[1] - bottom))
def scale(image, factor, resample=Image.BICUBIC):
"""
Returns a rescaled image by a specific factor given in parameter.
A factor greater than 1 expands the image, between 0 and 1 contracts the
image.
:param image: The image to rescale.
:param factor: The expansion factor, as a float.
:param resample: What resampling method to use. Default is
:py:attr:`PIL.Image.BICUBIC`. See :ref:`concept-filters`.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if factor == 1:
return image.copy()
elif factor <= 0:
raise ValueError("the factor must be greater than 0")
else:
size = (round(factor * image.width), round(factor * image.height))
return image.resize(size, resample)
def deform(image, deformer, resample=Image.BILINEAR):
"""
Deform the image.
:param image: The image to deform.
:param deformer: A deformer object. Any object that implements a
**getmesh** method can be used.
:param resample: An optional resampling filter. Same values possible as
in the PIL.Image.transform function.
:return: An image.
"""
return image.transform(image.size, Image.MESH, deformer.getmesh(image), resample)
def equalize(image, mask=None):
"""
Equalize the image histogram. This function applies a non-linear
mapping to the input image, in order to create a uniform
distribution of grayscale values in the output image.
:param image: The image to equalize.
:param mask: An optional mask. If given, only the pixels selected by
the mask are included in the analysis.
:return: An image.
"""
if image.mode == "P":
image = image.convert("RGB")
h = image.histogram(mask)
lut = []
for b in range(0, len(h), 256):
histo = [_f for _f in h[b : b + 256] if _f]
if len(histo) <= 1:
lut.extend(list(range(256)))
else:
step = (functools.reduce(operator.add, histo) - histo[-1]) // 255
if not step:
lut.extend(list(range(256)))
else:
n = step // 2
for i in range(256):
lut.append(n // step)
n = n + h[i + b]
return _lut(image, lut)
def expand(image, border=0, fill=0):
"""
Add border to the image
:param image: The image to expand.
:param border: Border width, in pixels.
:param fill: Pixel fill value (a color value). Default is 0 (black).
:return: An image.
"""
left, top, right, bottom = _border(border)
width = left + image.size[0] + right
height = top + image.size[1] + bottom
out = Image.new(image.mode, (width, height), _color(fill, image.mode))
out.paste(image, (left, top))
return out
def fit(image, size, method=Image.BICUBIC, bleed=0.0, centering=(0.5, 0.5)):
"""
Returns a sized and cropped version of the image, cropped to the
requested aspect ratio and size.
This function was contributed by Kevin Cazabon.
:param image: The image to size and crop.
:param size: The requested output size in pixels, given as a
(width, height) tuple.
:param method: What resampling method to use. Default is
:py:attr:`PIL.Image.BICUBIC`. See :ref:`concept-filters`.
:param bleed: Remove a border around the outside of the image from all
four edges. The value is a decimal percentage (use 0.01 for
one percent). The default value is 0 (no border).
Cannot be greater than or equal to 0.5.
:param centering: Control the cropping position. Use (0.5, 0.5) for
center cropping (e.g. if cropping the width, take 50% off
of the left side, and therefore 50% off the right side).
(0.0, 0.0) will crop from the top left corner (i.e. if
cropping the width, take all of the crop off of the right
side, and if cropping the height, take all of it off the
bottom). (1.0, 0.0) will crop from the bottom left
corner, etc. (i.e. if cropping the width, take all of the
crop off the left side, and if cropping the height take
none from the top, and therefore all off the bottom).
:return: An image.
"""
# by Kevin Cazabon, Feb 17/2000
# kevin@cazabon.com
# http://www.cazabon.com
# ensure centering is mutable
centering = list(centering)
if not 0.0 <= centering[0] <= 1.0:
centering[0] = 0.5
if not 0.0 <= centering[1] <= 1.0:
centering[1] = 0.5
if not 0.0 <= bleed < 0.5:
bleed = 0.0
# calculate the area to use for resizing and cropping, subtracting
# the 'bleed' around the edges
# number of pixels to trim off on Top and Bottom, Left and Right
bleed_pixels = (bleed * image.size[0], bleed * image.size[1])
live_size = (
image.size[0] - bleed_pixels[0] * 2,
image.size[1] - bleed_pixels[1] * 2,
)
# calculate the aspect ratio of the live_size
live_size_ratio = live_size[0] / live_size[1]
# calculate the aspect ratio of the output image
output_ratio = size[0] / size[1]
# figure out if the sides or top/bottom will be cropped off
if live_size_ratio == output_ratio:
# live_size is already the needed ratio
crop_width = live_size[0]
crop_height = live_size[1]
elif live_size_ratio >= output_ratio:
# live_size is wider than what's needed, crop the sides
crop_width = output_ratio * live_size[1]
crop_height = live_size[1]
else:
# live_size is taller than what's needed, crop the top and bottom
crop_width = live_size[0]
crop_height = live_size[0] / output_ratio
# make the crop
crop_left = bleed_pixels[0] + (live_size[0] - crop_width) * centering[0]
crop_top = bleed_pixels[1] + (live_size[1] - crop_height) * centering[1]
crop = (crop_left, crop_top, crop_left + crop_width, crop_top + crop_height)
# resize the image and return it
return image.resize(size, method, box=crop)
def flip(image):
"""
Flip the image vertically (top to bottom).
:param image: The image to flip.
:return: An image.
"""
return image.transpose(Image.FLIP_TOP_BOTTOM)
def grayscale(image):
"""
Convert the image to grayscale.
:param image: The image to convert.
:return: An image.
"""
return image.convert("L")
def invert(image):
"""
Invert (negate) the image.
:param image: The image to invert.
:return: An image.
"""
lut = []
for i in range(256):
lut.append(255 - i)
return _lut(image, lut)
def mirror(image):
"""
Flip image horizontally (left to right).
:param image: The image to mirror.
:return: An image.
"""
return image.transpose(Image.FLIP_LEFT_RIGHT)
def posterize(image, bits):
"""
Reduce the number of bits for each color channel.
:param image: The image to posterize.
:param bits: The number of bits to keep for each channel (1-8).
:return: An image.
"""
lut = []
mask = ~(2 ** (8 - bits) - 1)
for i in range(256):
lut.append(i & mask)
return _lut(image, lut)
def solarize(image, threshold=128):
"""
Invert all pixel values above a threshold.
:param image: The image to solarize.
:param threshold: All pixels above this greyscale level are inverted.
:return: An image.
"""
lut = []
for i in range(256):
if i < threshold:
lut.append(i)
else:
lut.append(255 - i)
return _lut(image, lut)
def exif_transpose(image):
"""
If an image has an EXIF Orientation tag, return a new image that is
transposed accordingly. Otherwise, return a copy of the image.
:param image: The image to transpose.
:return: An image.
"""
exif = image.getexif()
orientation = exif.get(0x0112)
method = {
2: Image.FLIP_LEFT_RIGHT,
3: Image.ROTATE_180,
4: Image.FLIP_TOP_BOTTOM,
5: Image.TRANSPOSE,
6: Image.ROTATE_270,
7: Image.TRANSVERSE,
8: Image.ROTATE_90,
}.get(orientation)
if method is not None:
transposed_image = image.transpose(method)
del exif[0x0112]
transposed_image.info["exif"] = exif.tobytes()
return transposed_image
return image.copy()
| |
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Top-level presubmit script for V8.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
import sys
_EXCLUDED_PATHS = (
r"^test[\\\/].*",
r"^testing[\\\/].*",
r"^third_party[\\\/].*",
r"^tools[\\\/].*",
)
# Regular expression that matches code only used for test binaries
# (best effort).
_TEST_CODE_EXCLUDED_PATHS = (
r'.+-unittest\.cc',
# Has a method VisitForTest().
r'src[\\\/]compiler[\\\/]ast-graph-builder\.cc',
# Test extension.
r'src[\\\/]extensions[\\\/]gc-extension\.cc',
)
_TEST_ONLY_WARNING = (
'You might be calling functions intended only for testing from\n'
'production code. It is OK to ignore this warning if you know what\n'
'you are doing, as the heuristics used to detect the situation are\n'
'not perfect. The commit queue will not block on this warning.')
def _V8PresubmitChecks(input_api, output_api):
"""Runs the V8 presubmit checks."""
import sys
sys.path.append(input_api.os_path.join(
input_api.PresubmitLocalPath(), 'tools'))
from presubmit import CppLintProcessor
from presubmit import SourceProcessor
from presubmit import CheckExternalReferenceRegistration
from presubmit import CheckAuthorizedAuthor
results = []
if not CppLintProcessor().Run(input_api.PresubmitLocalPath()):
results.append(output_api.PresubmitError("C++ lint check failed"))
if not SourceProcessor().Run(input_api.PresubmitLocalPath()):
results.append(output_api.PresubmitError(
"Copyright header, trailing whitespaces and two empty lines " \
"between declarations check failed"))
if not CheckExternalReferenceRegistration(input_api.PresubmitLocalPath()):
results.append(output_api.PresubmitError(
"External references registration check failed"))
results.extend(CheckAuthorizedAuthor(input_api, output_api))
return results
def _CheckUnwantedDependencies(input_api, output_api):
"""Runs checkdeps on #include statements added in this
change. Breaking - rules is an error, breaking ! rules is a
warning.
"""
# We need to wait until we have an input_api object and use this
# roundabout construct to import checkdeps because this file is
# eval-ed and thus doesn't have __file__.
original_sys_path = sys.path
try:
sys.path = sys.path + [input_api.os_path.join(
input_api.PresubmitLocalPath(), 'buildtools', 'checkdeps')]
import checkdeps
from cpp_checker import CppChecker
from rules import Rule
finally:
# Restore sys.path to what it was before.
sys.path = original_sys_path
added_includes = []
for f in input_api.AffectedFiles():
if not CppChecker.IsCppFile(f.LocalPath()):
continue
changed_lines = [line for line_num, line in f.ChangedContents()]
added_includes.append([f.LocalPath(), changed_lines])
deps_checker = checkdeps.DepsChecker(input_api.PresubmitLocalPath())
error_descriptions = []
warning_descriptions = []
for path, rule_type, rule_description in deps_checker.CheckAddedCppIncludes(
added_includes):
description_with_path = '%s\n %s' % (path, rule_description)
if rule_type == Rule.DISALLOW:
error_descriptions.append(description_with_path)
else:
warning_descriptions.append(description_with_path)
results = []
if error_descriptions:
results.append(output_api.PresubmitError(
'You added one or more #includes that violate checkdeps rules.',
error_descriptions))
if warning_descriptions:
results.append(output_api.PresubmitPromptOrNotify(
'You added one or more #includes of files that are temporarily\n'
'allowed but being removed. Can you avoid introducing the\n'
'#include? See relevant DEPS file(s) for details and contacts.',
warning_descriptions))
return results
def _CheckNoInlineHeaderIncludesInNormalHeaders(input_api, output_api):
"""Attempts to prevent inclusion of inline headers into normal header
files. This tries to establish a layering where inline headers can be
included by other inline headers or compilation units only."""
file_inclusion_pattern = r'(?!.+-inl\.h).+\.h'
include_directive_pattern = input_api.re.compile(r'#include ".+-inl.h"')
include_warning = (
'You might be including an inline header (e.g. foo-inl.h) within a\n'
'normal header (e.g. bar.h) file. Can you avoid introducing the\n'
'#include? The commit queue will not block on this warning.')
def FilterFile(affected_file):
black_list = (_EXCLUDED_PATHS +
input_api.DEFAULT_BLACK_LIST)
return input_api.FilterSourceFile(
affected_file,
white_list=(file_inclusion_pattern, ),
black_list=black_list)
problems = []
for f in input_api.AffectedSourceFiles(FilterFile):
local_path = f.LocalPath()
for line_number, line in f.ChangedContents():
if (include_directive_pattern.search(line)):
problems.append(
'%s:%d\n %s' % (local_path, line_number, line.strip()))
if problems:
return [output_api.PresubmitPromptOrNotify(include_warning, problems)]
else:
return []
def _CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api):
"""Attempts to prevent use of functions intended only for testing in
non-testing code. For now this is just a best-effort implementation
that ignores header files and may have some false positives. A
better implementation would probably need a proper C++ parser.
"""
# We only scan .cc files, as the declaration of for-testing functions in
# header files are hard to distinguish from calls to such functions without a
# proper C++ parser.
file_inclusion_pattern = r'.+\.cc'
base_function_pattern = r'[ :]test::[^\s]+|ForTest(ing)?|for_test(ing)?'
inclusion_pattern = input_api.re.compile(r'(%s)\s*\(' % base_function_pattern)
comment_pattern = input_api.re.compile(r'//.*(%s)' % base_function_pattern)
exclusion_pattern = input_api.re.compile(
r'::[A-Za-z0-9_]+(%s)|(%s)[^;]+\{' % (
base_function_pattern, base_function_pattern))
def FilterFile(affected_file):
black_list = (_EXCLUDED_PATHS +
_TEST_CODE_EXCLUDED_PATHS +
input_api.DEFAULT_BLACK_LIST)
return input_api.FilterSourceFile(
affected_file,
white_list=(file_inclusion_pattern, ),
black_list=black_list)
problems = []
for f in input_api.AffectedSourceFiles(FilterFile):
local_path = f.LocalPath()
for line_number, line in f.ChangedContents():
if (inclusion_pattern.search(line) and
not comment_pattern.search(line) and
not exclusion_pattern.search(line)):
problems.append(
'%s:%d\n %s' % (local_path, line_number, line.strip()))
if problems:
return [output_api.PresubmitPromptOrNotify(_TEST_ONLY_WARNING, problems)]
else:
return []
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
results.extend(input_api.canned_checks.CheckOwners(
input_api, output_api, source_file_filter=None))
results.extend(input_api.canned_checks.CheckPatchFormatted(
input_api, output_api))
results.extend(_V8PresubmitChecks(input_api, output_api))
results.extend(_CheckUnwantedDependencies(input_api, output_api))
results.extend(
_CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api))
results.extend(
_CheckNoInlineHeaderIncludesInNormalHeaders(input_api, output_api))
return results
def _SkipTreeCheck(input_api, output_api):
"""Check the env var whether we want to skip tree check.
Only skip if include/v8-version.h has been updated."""
src_version = 'include/v8-version.h'
if not input_api.AffectedSourceFiles(
lambda file: file.LocalPath() == src_version):
return False
return input_api.environ.get('PRESUBMIT_TREE_CHECK') == 'skip'
def _CheckChangeLogFlag(input_api, output_api, warn):
"""Checks usage of LOG= flag in the commit message."""
results = []
if (input_api.change.BUG and input_api.change.BUG != 'none' and
not 'LOG' in input_api.change.tags):
text = ('An issue reference (BUG=) requires a change log flag (LOG=). '
'Use LOG=Y for including this commit message in the change log. '
'Use LOG=N or leave blank otherwise.')
if warn:
results.append(output_api.PresubmitPromptWarning(text))
else:
results.append(output_api.PresubmitError(text))
return results
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
results.extend(_CheckChangeLogFlag(input_api, output_api, True))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
results.extend(_CheckChangeLogFlag(input_api, output_api, False))
results.extend(input_api.canned_checks.CheckChangeHasDescription(
input_api, output_api))
if not _SkipTreeCheck(input_api, output_api):
results.extend(input_api.canned_checks.CheckTreeIsOpen(
input_api, output_api,
json_url='http://v8-status.appspot.com/current?format=json'))
return results
def GetPreferredTryMasters(project, change):
return {
'tryserver.v8': {
'v8_linux_rel': set(['defaulttests']),
'v8_linux_dbg': set(['defaulttests']),
'v8_linux_nodcheck_rel': set(['defaulttests']),
'v8_linux_gcc_compile_rel': set(['defaulttests']),
'v8_linux64_rel': set(['defaulttests']),
'v8_linux64_asan_rel': set(['defaulttests']),
'v8_linux64_avx2_rel': set(['defaulttests']),
'v8_win_rel': set(['defaulttests']),
'v8_win_compile_dbg': set(['defaulttests']),
'v8_win_nosnap_shared_compile_rel': set(['defaulttests']),
'v8_win64_rel': set(['defaulttests']),
'v8_mac_rel': set(['defaulttests']),
'v8_linux_arm_rel': set(['defaulttests']),
'v8_linux_arm64_rel': set(['defaulttests']),
'v8_linux_mipsel_compile_rel': set(['defaulttests']),
'v8_linux_mips64el_compile_rel': set(['defaulttests']),
'v8_android_arm_compile_rel': set(['defaulttests']),
'v8_linux_chromium_gn_rel': set(['defaulttests']),
},
}
| |
from copy import deepcopy
import json
from datetime import datetime
from flask_login import current_user
from werkzeug.datastructures import MultiDict
from portality import models, lock
from portality.core import app
# from portality.formcontext import formcontext
from portality.forms.application_forms import JournalFormFactory
from portality.tasks.redis_huey import main_queue
from portality.decorators import write_required
from portality.background import AdminBackgroundTask, BackgroundApi, BackgroundException, BackgroundSummary
def journal_manage(selection_query, dry_run=True, editor_group='', note='', **kwargs):
ids = JournalBulkEditBackgroundTask.resolve_selection_query(selection_query)
if dry_run:
JournalBulkEditBackgroundTask.check_admin_privilege(current_user.id)
return BackgroundSummary(None, affected={"journals" : len(ids)})
if kwargs is None:
kwargs = {}
if editor_group != "":
kwargs["editor_group"] = editor_group
job = JournalBulkEditBackgroundTask.prepare(
current_user.id,
selection_query=selection_query,
note=note,
ids=ids,
replacement_metadata=kwargs
)
JournalBulkEditBackgroundTask.submit(job)
affected = len(ids)
job_id = None
if job is not None:
job_id = job.id
return BackgroundSummary(job_id, affected={"journals" : affected})
class JournalBulkEditBackgroundTask(AdminBackgroundTask):
__action__ = "journal_bulk_edit"
@classmethod
def _job_parameter_check(cls, params):
# we definitely need "ids" defined
# we need at least one of "editor_group" or "note" defined as well
ids = cls.get_param(params, "ids")
ids_valid = ids is not None and len(ids) > 0
note = cls.get_param(params, "note")
note_valid = note is not None
metadata = cls.get_param(params, "replacement_metadata", "{}")
metadata = json.loads(metadata)
metadata_valid = len(metadata.keys()) > 0
return ids_valid and (note_valid or metadata_valid)
def run(self):
"""
Execute the task as specified by the background_job
:return:
"""
job = self.background_job
params = job.params
if not self._job_parameter_check(params):
raise BackgroundException("{}.run run without sufficient parameters".format(self.__class__.__name__))
# get the parameters for the job
ids = self.get_param(params, 'ids')
note = self.get_param(params, 'note')
metadata = json.loads(self.get_param(params, 'replacement_metadata', "{}"))
# if there is metadata, validate it
if len(metadata.keys()) > 0:
formdata = MultiDict(metadata)
formulaic_context = JournalFormFactory.context("bulk_edit")
fc = formulaic_context.processor(formdata=formdata)
if not fc.validate():
raise BackgroundException("Unable to validate replacement metadata: " + json.dumps(metadata))
for journal_id in ids:
updated = False
j = models.Journal.pull(journal_id)
if j is None:
job.add_audit_message("Journal with id {} does not exist, skipping".format(journal_id))
continue
formulaic_context = JournalFormFactory.context("admin")
fc = formulaic_context.processor(source=j)
# turn on the "all fields optional" flag, so that bulk tasks don't cause errors that the user iterface
# would allow you to bypass
fc.form.make_all_fields_optional.data = True
if "editor_group" in metadata:
fc.form.editor.data = None
elif j.editor_group is not None:
# FIXME: this is a bit of a stop-gap, pending a more substantial referential-integrity-like solution
# if the editor group is not being changed, validate that the editor is actually in the editor group,
# and if not, unset them
eg = models.EditorGroup.pull_by_key("name", j.editor_group)
if eg is not None:
all_eds = eg.associates + [eg.editor]
if j.editor not in all_eds:
fc.form.editor.data = None
else:
# if we didn't find the editor group, this is broken anyway, so reset the editor data anyway
fc.form.editor.data = None
for k, v in metadata.items():
if k != "change_doaj_seal":
job.add_audit_message("Setting {f} to {x} for journal {y}".format(f=k, x=v, y=journal_id))
fc.form[k].data = v
else:
if v:
fc.form.doaj_seal.data = v
updated = True
if note:
job.add_audit_message("Adding note to for journal {y}".format(y=journal_id))
fc.form.notes.append_entry(
{'note_date': datetime.now().strftime(app.config['DEFAULT_DATE_FORMAT']), 'note': note}
)
updated = True
if updated:
if fc.validate():
try:
fc.finalise()
except Exception as e:
job.add_audit_message("Form context exception while bulk editing journal {} :\n{}".format(journal_id, str(e)))
else:
data_submitted = {}
for affected_field_name in fc.form.errors.keys():
affected_field = getattr(fc.form, affected_field_name,
' Field {} does not exist on form. '.format(affected_field_name))
if isinstance(affected_field, str): # ideally this should never happen, an error should not be reported on a field that is not present on the form
data_submitted[affected_field_name] = affected_field
continue
data_submitted[affected_field_name] = affected_field.data
job.add_audit_message(
"Data validation failed while bulk editing journal {} :\n"
"{}\n\n"
"The data from the fields with the errors is:\n{}".format(
journal_id, json.dumps(fc.form.errors), json.dumps(data_submitted)
)
)
def cleanup(self):
"""
Cleanup after a successful OR failed run of the task
:return:
"""
job = self.background_job
params = job.params
ids = self.get_param(params, 'ids')
username = job.user
lock.batch_unlock("journal", ids, username)
@classmethod
def resolve_selection_query(cls, selection_query):
q = deepcopy(selection_query)
q["_source"] = False
iterator = models.Journal.iterate(q=q, page_size=5000, wrap=False)
return [j['_id'] for j in iterator]
@classmethod
def prepare(cls, username, **kwargs):
"""
Take an arbitrary set of keyword arguments and return an instance of a BackgroundJob,
or fail with a suitable exception
:param kwargs: arbitrary keyword arguments pertaining to this task type
:return: a BackgroundJob instance representing this task
"""
super(JournalBulkEditBackgroundTask, cls).prepare(username, **kwargs)
# first prepare a job record
job = models.BackgroundJob()
job.user = username
job.action = cls.__action__
refs = {}
cls.set_reference(refs, "selection_query", json.dumps(kwargs['selection_query']))
job.reference = refs
params = {}
# get the named parameters we know may be there
cls.set_param(params, 'ids', kwargs['ids'])
if "note" in kwargs and kwargs["note"] is not None and kwargs["note"] != "":
cls.set_param(params, 'note', kwargs.get('note', ''))
# get the metadata overwrites
if "replacement_metadata" in kwargs:
metadata = {}
for k, v in kwargs["replacement_metadata"].items():
if v is not None and v != "":
metadata[k] = v
if len(metadata.keys()) > 0:
cls.set_param(params, 'replacement_metadata', json.dumps(metadata))
if not cls._job_parameter_check(params):
raise BackgroundException("{}.prepare run without sufficient parameters".format(cls.__name__))
job.params = params
# now ensure that we have the locks for all the journals
# will raise an exception if this fails
lock.batch_lock("journal", kwargs['ids'], username, timeout=app.config.get("BACKGROUND_TASK_LOCK_TIMEOUT", 3600))
return job
@classmethod
def submit(cls, background_job):
"""
Submit the specified BackgroundJob to the background queue
:param background_job: the BackgroundJob instance
:return:
"""
background_job.save(blocking=True)
journal_bulk_edit.schedule(args=(background_job.id,), delay=10)
@main_queue.task()
@write_required(script=True)
def journal_bulk_edit(job_id):
job = models.BackgroundJob.pull(job_id)
task = JournalBulkEditBackgroundTask(job)
BackgroundApi.execute(task)
| |
#! /usr/bin/env python
import string
import sys
from xml.dom import Node
try:
from xml.ns import XMLNS
except ImportError:
class XMLNS(object):
BASE = "http://www.w3.org/2000/xmlns/"
XML = "http://www.w3.org/XML/1998/namespace"
try:
from io import StringIO
except ImportError:
from cStringIO import StringIO
'''XML Canonicalization
Patches Applied to xml.dom.ext.c14n:
http://sourceforge.net/projects/pyxml/
[ 1444526 ] c14n.py: http://www.w3.org/TR/xml-exc-c14n/ fix
-- includes [ 829905 ] c14n.py fix for bug #825115,
Date Submitted: 2003-10-24 23:43
-- include dependent namespace declarations declared in ancestor nodes
(checking attributes and tags),
-- handle InclusiveNamespaces PrefixList parameter
This module generates canonical XML of a document or element.
http://www.w3.org/TR/2001/REC-xml-c14n-20010315
and includes a prototype of exclusive canonicalization
http://www.w3.org/Signature/Drafts/xml-exc-c14n
Requires PyXML 0.7.0 or later.
Known issues if using Ft.Lib.pDomlette:
1. Unicode
2. does not white space normalize attributes of type NMTOKEN and ID?
3. seems to be include "\n" after importing external entities?
Note, this version processes a DOM tree, and consequently it processes
namespace nodes as attributes, not from a node's namespace axis. This
permits simple document and element canonicalization without
XPath. When XPath is used, the XPath result node list is passed and used to
determine if the node is in the XPath result list, but little else.
Authors:
"Joseph M. Reagle Jr." <reagle@w3.org>
"Rich Salz" <rsalz@zolera.com>
$Date$ by $Author$
'''
_copyright = '''Copyright 2001, Zolera Systems Inc. All Rights Reserved.
Copyright 2001, MIT. All Rights Reserved.
Distributed under the terms of:
Python 2.0 License or later.
http://www.python.org/2.0.1/license.html
or
W3C Software License
http://www.w3.org/Consortium/Legal/copyright-software-19980720
'''
def _attrs(E):
return (E.attributes and list(E.attributes.values())) or []
def _children(E):
return E.childNodes or []
def _IN_XML_NS(n):
return n.name.startswith("xmlns")
def _inclusive(n):
return n.unsuppressedPrefixes is None
# Does a document/PI has lesser/greater document order than the
# first element?
_LesserElement, _Element, _GreaterElement = list(range(3))
if sys.version_info[0] > 2:
def cmp(a, b):
return (a > b) - (a < b)
def _sorter(n1, n2):
'''_sorter(n1,n2) -> int
Sorting predicate for non-NS attributes.
'''
i = cmp(n1.namespaceURI, n2.namespaceURI)
if i:
return i
return cmp(n1.localName, n2.localName)
def _sorter_ns(n1, n2):
'''_sorter_ns((n,v),(n,v)) -> int
"(an empty namespace URI is lexicographically least)."
'''
if n1[0] == 'xmlns':
return -1
if n2[0] == 'xmlns':
return 1
return cmp(n1[0], n2[0])
def _utilized(n, node, other_attrs, unsuppressedPrefixes):
'''_utilized(n, node, other_attrs, unsuppressedPrefixes) -> boolean
Return true if that nodespace is utilized within the node
'''
if n.startswith('xmlns:'):
n = n[6:]
elif n.startswith('xmlns'):
n = n[5:]
if (n == "" and node.prefix in ["#default", None]) or \
n == node.prefix or n in unsuppressedPrefixes:
return 1
for attr in other_attrs:
if n == attr.prefix:
return 1
# For exclusive need to look at attributes
if unsuppressedPrefixes is not None:
for attr in _attrs(node):
if n == attr.prefix:
return 1
return 0
def _inclusiveNamespacePrefixes(node, context, unsuppressedPrefixes):
'''http://www.w3.org/TR/xml-exc-c14n/
InclusiveNamespaces PrefixList parameter, which lists namespace prefixes that
are handled in the manner described by the Canonical XML Recommendation
'''
inclusive = []
if node.prefix:
usedPrefixes = ['xmlns:%s' % node.prefix]
else:
usedPrefixes = ['xmlns']
for a in _attrs(node):
if a.nodeName.startswith('xmlns') or not a.prefix:
continue
usedPrefixes.append('xmlns:%s' % a.prefix)
unused_namespace_dict = {}
for attr in context:
n = attr.nodeName
if n in unsuppressedPrefixes:
inclusive.append(attr)
elif n.startswith('xmlns:') and n[6:] in unsuppressedPrefixes:
inclusive.append(attr)
elif n.startswith('xmlns') and n[5:] in unsuppressedPrefixes:
inclusive.append(attr)
elif attr.nodeName in usedPrefixes:
inclusive.append(attr)
elif n.startswith('xmlns:'):
unused_namespace_dict[n] = attr.value
return inclusive, unused_namespace_dict
# _in_subset = lambda subset, node: not subset or node in subset
def _in_subset(subset, node):
return subset is None or node in subset # rich's tweak
class _implementation(object):
'''Implementation class for C14N.
This accompanies a node during it's
processing and includes the parameters and processing state.
'''
# Handler for each node type; populated during module instantiation.
handlers = {}
def __init__(self, node, write, **kw):
'''Create and run the implementation.'''
self.write = write
self.subset = kw.get('subset')
self.comments = kw.get('comments', 0)
self.unsuppressedPrefixes = kw.get('unsuppressedPrefixes')
nsdict = kw.get('nsdict', {'xml': XMLNS.XML, 'xmlns': XMLNS.BASE})
# Processing state.
self.state = (nsdict, {'xml': ''}, {}, {}) # 0422
if node.nodeType == Node.DOCUMENT_NODE:
self._do_document(node)
elif node.nodeType == Node.ELEMENT_NODE:
self.documentOrder = _Element # At document element
if not _inclusive(self):
inherited, unused = _inclusiveNamespacePrefixes(node, self._inherit_context(node),
self.unsuppressedPrefixes)
self._do_element(node, inherited, unused=unused)
else:
inherited = self._inherit_context(node)
self._do_element(node, inherited)
elif node.nodeType == Node.DOCUMENT_TYPE_NODE:
pass
else:
raise TypeError(str(node))
def _inherit_context(self, node):
'''_inherit_context(self, node) -> list
Scan ancestors of attribute and namespace context. Used only
for single element node canonicalization, not for subset
canonicalization.
'''
# Collect the initial list of xml:foo attributes.
xmlattrs = list(filter(_IN_XML_NS, _attrs(node)))
# Walk up and get all xml:XXX attributes we inherit.
inherited, parent = [], node.parentNode
while parent and parent.nodeType == Node.ELEMENT_NODE:
for a in filter(_IN_XML_NS, _attrs(parent)):
n = a.localName
if n not in xmlattrs:
xmlattrs.append(n)
inherited.append(a)
parent = parent.parentNode
return inherited
def _do_document(self, node):
'''_do_document(self, node) -> None
Process a document node. documentOrder holds whether the document
element has been encountered such that PIs/comments can be written
as specified.
'''
self.documentOrder = _LesserElement
for child in node.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
self.documentOrder = _Element # At document element
self._do_element(child)
self.documentOrder = _GreaterElement # After document element
elif child.nodeType == Node.PROCESSING_INSTRUCTION_NODE:
self._do_pi(child)
elif child.nodeType == Node.COMMENT_NODE:
self._do_comment(child)
elif child.nodeType == Node.DOCUMENT_TYPE_NODE:
pass
else:
raise TypeError(str(child))
handlers[Node.DOCUMENT_NODE] = _do_document
def _do_text(self, node):
'''_do_text(self, node) -> None
Process a text or CDATA node. Render various special characters
as their C14N entity representations.
'''
if not _in_subset(self.subset, node):
return
s = string.replace(node.data, "&", "&")
s = string.replace(s, "<", "<")
s = string.replace(s, ">", ">")
s = string.replace(s, "\015", "
")
if s:
self.write(s)
handlers[Node.TEXT_NODE] = _do_text
handlers[Node.CDATA_SECTION_NODE] = _do_text
def _do_pi(self, node):
'''_do_pi(self, node) -> None
Process a PI node. Render a leading or trailing #xA if the
document order of the PI is greater or lesser (respectively)
than the document element.
'''
if not _in_subset(self.subset, node):
return
W = self.write
if self.documentOrder == _GreaterElement:
W('\n')
W('<?')
W(node.nodeName)
s = node.data
if s:
W(' ')
W(s)
W('?>')
if self.documentOrder == _LesserElement:
W('\n')
handlers[Node.PROCESSING_INSTRUCTION_NODE] = _do_pi
def _do_comment(self, node):
'''_do_comment(self, node) -> None
Process a comment node. Render a leading or trailing #xA if the
document order of the comment is greater or lesser (respectively)
than the document element.
'''
if not _in_subset(self.subset, node):
return
if self.comments:
W = self.write
if self.documentOrder == _GreaterElement:
W('\n')
W('<!--')
W(node.data)
W('-->')
if self.documentOrder == _LesserElement:
W('\n')
handlers[Node.COMMENT_NODE] = _do_comment
def _do_attr(self, n, value):
''''_do_attr(self, node) -> None
Process an attribute.
'''
W = self.write
W(' ')
W(n)
W('="')
s = string.replace(value, "&", "&")
s = string.replace(s, "<", "<")
s = string.replace(s, '"', '"')
s = string.replace(s, '\011', '	')
s = string.replace(s, '\012', '
')
s = string.replace(s, '\015', '
')
W(s)
W('"')
def _do_element(self, node, initial_other_attrs=[], unused=None):
'''_do_element(self, node, initial_other_attrs = [], unused = {}) -> None
Process an element (and its children).
'''
# Get state (from the stack) make local copies.
# ns_parent -- NS declarations in parent
# ns_rendered -- NS nodes rendered by ancestors
# ns_local -- NS declarations relevant to this element
# xml_attrs -- Attributes in XML namespace from parent
# xml_attrs_local -- Local attributes in XML namespace.
# ns_unused_inherited -- not rendered namespaces, used for exclusive
ns_parent, ns_rendered, xml_attrs = \
self.state[0], self.state[1].copy(), self.state[2].copy() # 0422
ns_unused_inherited = unused
if unused is None:
ns_unused_inherited = self.state[3].copy()
ns_local = ns_parent.copy()
inclusive = _inclusive(self)
xml_attrs_local = {}
# Divide attributes into NS, XML, and others.
other_attrs = []
in_subset = _in_subset(self.subset, node)
for a in initial_other_attrs + _attrs(node):
if a.namespaceURI == XMLNS.BASE:
n = a.nodeName
if n == "xmlns:":
n = "xmlns" # DOM bug workaround
ns_local[n] = a.nodeValue
elif a.namespaceURI == XMLNS.XML:
# 020925 Test to see if attribute node in subset
if inclusive or (in_subset and _in_subset(self.subset, a)):
xml_attrs_local[a.nodeName] = a # 0426
else:
if _in_subset(self.subset, a): # 020925 Test to see if attribute node in subset
other_attrs.append(a)
# # exclusive, might need to define xmlns:prefix here
# if not inclusive and a.prefix is not None and not ns_rendered.has_key('xmlns:%s' %a.prefix):
# ns_local['xmlns:%s' %a.prefix] = ??
# add local xml:foo attributes to ancestor's xml:foo attributes
xml_attrs.update(xml_attrs_local)
# Render the node
W, name = self.write, None
if in_subset:
name = node.nodeName
if not inclusive:
if node.prefix is not None:
prefix = 'xmlns:%s' % node.prefix
else:
prefix = 'xmlns'
if prefix not in ns_rendered and prefix not in ns_local:
if prefix not in ns_unused_inherited:
raise RuntimeError('For exclusive c14n, unable to map prefix "%s" in %s' % (
prefix, node))
ns_local[prefix] = ns_unused_inherited[prefix]
del ns_unused_inherited[prefix]
W('<')
W(name)
# Create list of NS attributes to render.
ns_to_render = []
for n, v in list(ns_local.items()):
# If default namespace is XMLNS.BASE or empty,
# and if an ancestor was the same
if n == "xmlns" and v in [XMLNS.BASE, ''] \
and ns_rendered.get('xmlns') in [XMLNS.BASE, '', None]:
continue
# "omit namespace node with local name xml, which defines
# the xml prefix, if its string value is
# http://www.w3.org/XML/1998/namespace."
if n in ["xmlns:xml", "xml"] \
and v in ['http://www.w3.org/XML/1998/namespace']:
continue
# If not previously rendered
# and it's inclusive or utilized
if (n, v) not in list(ns_rendered.items()):
if inclusive or _utilized(n, node, other_attrs, self.unsuppressedPrefixes):
ns_to_render.append((n, v))
elif not inclusive:
ns_unused_inherited[n] = v
# Sort and render the ns, marking what was rendered.
ns_to_render.sort(_sorter_ns)
for n, v in ns_to_render:
self._do_attr(n, v)
ns_rendered[n] = v # 0417
# If exclusive or the parent is in the subset, add the local xml attributes
# Else, add all local and ancestor xml attributes
# Sort and render the attributes.
if not inclusive or _in_subset(self.subset, node.parentNode): # 0426
other_attrs.extend(list(xml_attrs_local.values()))
else:
other_attrs.extend(list(xml_attrs.values()))
other_attrs.sort(_sorter)
for a in other_attrs:
self._do_attr(a.nodeName, a.value)
W('>')
# Push state, recurse, pop state.
state, self.state = self.state, (ns_local,
ns_rendered, xml_attrs, ns_unused_inherited)
for c in _children(node):
_implementation.handlers[c.nodeType](self, c)
self.state = state
if name:
W('</%s>' % name)
handlers[Node.ELEMENT_NODE] = _do_element
def Canonicalize(node, output=None, **kw):
'''Canonicalize(node, output=None, **kw) -> UTF-8
Canonicalize a DOM document/element node and all descendents.
Return the text; if output is specified then output.write will
be called to output the text and None will be returned
Keyword parameters:
nsdict: a dictionary of prefix:uri namespace entries
assumed to exist in the surrounding context
comments: keep comments if non-zero (default is 0)
subset: Canonical XML subsetting resulting from XPath
(default is [])
unsuppressedPrefixes: do exclusive C14N, and this specifies the
prefixes that should be inherited.
'''
if output:
_implementation(*(node, output.write), **kw)
else:
s = StringIO.StringIO()
_implementation(*(node, s.write), **kw)
return s.getvalue()
| |
"""
Tests for field placement (size/position) rendering
"""
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne (GK@ACM.ORG)"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
import sys
import os
import unittest
import re
from annalist.views.fields.render_placement import (
Placement, LayoutOptions, get_placement_classes,
get_field_placement_renderer,
option_body, view_body
)
from .field_rendering_support import FieldRendererTestSupport
class FieldPlacementRenderingTest(FieldRendererTestSupport):
def setUp(self):
self.placement_context = self._make_test_context("small:0,12;medium:4,4")
return
def tearDown(self):
return
def test_get_placement_classes(self):
"""
This was a DocTest, but Py3 compatibility....
"""
self.assertEqual(
get_placement_classes("medium:0,12"),
Placement(
width=LayoutOptions(s=12, m=12, l=12),
offset=LayoutOptions(s=0, m=0, l=0),
display=LayoutOptions(s=True, m=True, l=True),
field='small-12 columns',
label='small-12 medium-2 columns',
value='small-12 medium-10 columns'
)
)
self.assertEqual(
get_placement_classes("large:0,12"),
Placement(
width=LayoutOptions(s=12, m=12, l=12),
offset=LayoutOptions(s=0, m=0, l=0),
display=LayoutOptions(s=True, m=True, l=True),
field='small-12 columns',
label='small-12 medium-2 columns',
value='small-12 medium-10 columns'
)
)
self.assertEqual(
get_placement_classes("small:0,12;medium:0,4"),
Placement(
width=LayoutOptions(s=12, m=4, l=4),
offset=LayoutOptions(s=0, m=0, l=0),
display=LayoutOptions(s=True, m=True, l=True),
field='small-12 medium-4 columns',
label='small-12 medium-6 columns',
value='small-12 medium-6 columns'
)
)
self.assertEqual(
get_placement_classes("small:0,12; medium:0,4"),
Placement(
width=LayoutOptions(s=12, m=4, l=4),
offset=LayoutOptions(s=0, m=0, l=0),
display=LayoutOptions(s=True, m=True, l=True),
field='small-12 medium-4 columns',
label='small-12 medium-6 columns',
value='small-12 medium-6 columns'
)
)
self.assertEqual(
get_placement_classes("small:0,12;medium:0,6;large:0,4"),
Placement(
width=LayoutOptions(s=12, m=6, l=4),
offset=LayoutOptions(s=0, m=0, l=0),
display=LayoutOptions(s=True, m=True, l=True),
field='small-12 medium-6 large-4 columns',
label='small-12 medium-4 large-6 columns',
value='small-12 medium-8 large-6 columns'
)
)
self.assertEqual(
get_placement_classes("small:0,6;medium:0,4"),
Placement(
width=LayoutOptions(s=6, m=4, l=4),
offset=LayoutOptions(s=0, m=0, l=0),
display=LayoutOptions(s=True, m=True, l=True),
field='small-6 medium-4 columns',
label='small-12 medium-6 columns',
value='small-12 medium-6 columns'
)
)
self.assertEqual(
get_placement_classes("small:0,6;medium:0,4,right"),
Placement(
width=LayoutOptions(s=6, m=4, l=4),
offset=LayoutOptions(s=0, m=0, l=0),
display=LayoutOptions(s=True, m=True, l=True),
field='small-6 medium-4 columns right',
label='small-12 medium-6 columns',
value='small-12 medium-6 columns'
)
)
self.assertEqual(
get_placement_classes("small:0,6"),
Placement(
width=LayoutOptions(s=6, m=6, l=6),
offset=LayoutOptions(s=0, m=0, l=0),
display=LayoutOptions(s=True, m=True, l=True),
field='small-6 columns',
label='small-12 medium-4 columns',
value='small-12 medium-8 columns'
)
)
self.assertEqual(
get_placement_classes("small:0,12,hide;medium:0,4"),
Placement(
width=LayoutOptions(s=12, m=4, l=4),
offset=LayoutOptions(s=0, m=0, l=0),
display=LayoutOptions(s=False, m=True, l=True),
field='small-12 medium-4 columns show-for-medium-up',
label='small-12 medium-6 columns',
value='small-12 medium-6 columns'
)
)
return
def test_RenderFieldPlacementValue(self):
expect_rendered_view=view_body(
'''<span class="placement-text">'''+
'''....####.... (4/4)'''+
'''</span>'''
)
expect_rendered_edit=option_body(
'''<select class="placement-text" name="repeat_prefix_test_field">\n'''+
''' <option value="">(test placeholder)</option>\n'''+
''' <option value="small:0,12">'''+
'''############ (0/12)'''+
'''</option>\n'''+
''' <option value="small:0,12;medium:0,6">'''+
'''######...... (0/6)'''+
'''</option>\n'''+
''' <option value="small:0,12;medium:3,6">'''+
'''...######... (3/6)'''+
'''</option>\n'''+
''' <option value="small:0,12;medium:6,6">'''+
'''......###### (6/6)'''+
'''</option>\n'''+
''' <option value="small:0,12;medium:0,4">'''+
'''####........ (0/4)'''+
'''</option>\n'''+
''' <option value="small:0,12;medium:4,4" selected="selected">'''+
'''....####.... (4/4)'''+
'''</option>\n'''+
''' <option value="small:0,12;medium:8,4">'''+
'''........#### (8/4)'''+
'''</option>\n'''+
''' <option value="small:0,12;medium:0,3">'''+
'''###......... (0/3)'''+
'''</option>\n'''+
''' <option value="small:0,12;medium:3,3">'''+
'''...###...... (3/3)'''+
'''</option>\n'''+
''' <option value="small:0,12;medium:6,3">'''+
'''......###... (6/3)'''+
'''</option>\n'''+
''' <option value="small:0,12;medium:9,3">'''+
'''.........### (9/3)'''+
'''</option>\n'''+
# Old right-aligned placement strings
''' <option value="small:0,12;medium:6,6right">'''+
'''......###### (6/6R)'''+
'''</option>\n'''+
''' <option value="small:0,12;medium:8,4right">'''+
'''........#### (8/4R)'''+
'''</option>\n'''+
''' <option value="small:0,12;medium:9,3right">'''+
'''.........### (9/3R)'''+
'''</option>\n'''+
# Column placement strings affecting all display sizes
''' <option value="small:0,9">'''+
'''#########... (0/9col)'''+
'''</option>\n'''+
''' <option value="small:3,9">'''+
'''...######### (3/9col)'''+
'''</option>\n'''+
''' <option value="small:0,8">'''+
'''########.... (0/8col)'''+
'''</option>\n'''+
''' <option value="small:4,8">'''+
'''....######## (4/8col)'''+
'''</option>\n'''+
''' <option value="small:0,6">'''+
'''######...... (0/6col)'''+
'''</option>\n'''+
''' <option value="small:3,6">'''+
'''...######... (3/6col)'''+
'''</option>\n'''+
''' <option value="small:6,6">'''+
'''......###### (6/6col)'''+
'''</option>\n'''+
''' <option value="small:0,4">'''+
'''####........ (0/4col)'''+
'''</option>\n'''+
''' <option value="small:4,4">'''+
'''....####.... (4/4col)'''+
'''</option>\n'''+
''' <option value="small:8,4">'''+
'''........#### (8/4col)'''+
'''</option>\n'''+
''' <option value="small:0,3">'''+
'''###......... (0/3col)'''+
'''</option>\n'''+
''' <option value="small:3,3">'''+
'''...###...... (3/3col)'''+
'''</option>\n'''+
''' <option value="small:6,3">'''+
'''......###... (6/3col)'''+
'''</option>\n'''+
''' <option value="small:9,3">'''+
'''.........### (9/3col)'''+
'''</option>\n'''+
'''</select>'''
)
self._check_value_renderer_results(
get_field_placement_renderer(),
context=self.placement_context,
expect_rendered_view=expect_rendered_view,
expect_rendered_edit=expect_rendered_edit
)
return
# End.
if __name__ == "__main__":
# import django
# django.setup() # Needed for template loader
# Runtests in this module
# runner = unittest.TextTestRunner(verbosity=2)
# tests = unittest.TestSuite()
# tests = getSuite(select=sel)
# if tests: runner.run(tests)
unittest.main()
| |
import os
import sys
import re
import simplejson as json
from cuddlefish.bunch import Bunch
MANIFEST_NAME = 'package.json'
DEFAULT_LOADER = 'jetpack-core'
DEFAULT_PROGRAM_MODULE = 'main'
METADATA_PROPS = ['name', 'description', 'keywords', 'author',
'contributors', 'license', 'url']
RESOURCE_HOSTNAME_RE = re.compile(r'^[a-z0-9_\-]+$')
class Error(Exception):
pass
class MalformedPackageError(Error):
pass
class MalformedJsonFileError(Error):
pass
class DuplicatePackageError(Error):
pass
class PackageNotFoundError(Error):
def __init__(self, missing_package, reason):
self.missing_package = missing_package
self.reason = reason
def __str__(self):
return "%s (%s)" % (self.missing_package, self.reason)
class BadChromeMarkerError(Error):
pass
def validate_resource_hostname(name):
"""
Validates the given hostname for a resource: URI.
For more information, see:
https://bugzilla.mozilla.org/show_bug.cgi?id=566812#c13
Examples:
>>> validate_resource_hostname('blarg')
>>> validate_resource_hostname('BLARG')
Traceback (most recent call last):
...
ValueError: invalid resource hostname: BLARG
>>> validate_resource_hostname('foo@bar')
Traceback (most recent call last):
...
ValueError: invalid resource hostname: foo@bar
"""
if not RESOURCE_HOSTNAME_RE.match(name):
raise ValueError('invalid resource hostname: %s' % name)
def find_packages_with_module(pkg_cfg, name):
# TODO: Make this support more than just top-level modules.
filename = "%s.js" % name
packages = []
for cfg in pkg_cfg.packages.itervalues():
if 'lib' in cfg:
matches = [dirname for dirname in resolve_dirs(cfg, cfg.lib)
if os.path.exists(os.path.join(dirname, filename))]
if matches:
packages.append(cfg.name)
return packages
def resolve_dirs(pkg_cfg, dirnames):
for dirname in dirnames:
yield resolve_dir(pkg_cfg, dirname)
def resolve_dir(pkg_cfg, dirname):
return os.path.join(pkg_cfg.root_dir, dirname)
def get_metadata(pkg_cfg, deps):
metadata = Bunch()
for pkg_name in deps:
cfg = pkg_cfg.packages[pkg_name]
metadata[pkg_name] = Bunch()
for prop in METADATA_PROPS:
if cfg.get(prop):
metadata[pkg_name][prop] = cfg[prop]
return metadata
def is_dir(path):
return os.path.exists(path) and os.path.isdir(path)
def apply_default_dir(base_json, base_path, dirname):
if (not base_json.get(dirname) and
is_dir(os.path.join(base_path, dirname))):
base_json[dirname] = dirname
def normalize_string_or_array(base_json, key):
if base_json.get(key):
if isinstance(base_json[key], basestring):
base_json[key] = [base_json[key]]
def load_json_file(path):
data = open(path, 'r').read()
try:
return Bunch(json.loads(data))
except ValueError, e:
raise MalformedJsonFileError('%s when reading "%s"' % (str(e),
path))
def get_config_in_dir(path):
package_json = os.path.join(path, MANIFEST_NAME)
if not (os.path.exists(package_json) and
os.path.isfile(package_json)):
raise MalformedPackageError('%s not found in "%s"' % (MANIFEST_NAME,
path))
base_json = load_json_file(package_json)
if 'name' not in base_json:
base_json.name = os.path.basename(path)
for dirname in ['lib', 'tests', 'data', 'packages']:
apply_default_dir(base_json, path, dirname)
for key in ['lib', 'tests', 'dependencies', 'packages']:
normalize_string_or_array(base_json, key)
if 'xpcom' in base_json:
base_json.xpcom = Bunch(base_json.xpcom)
if 'main' not in base_json and 'lib' in base_json:
for dirname in base_json['lib']:
program = os.path.join(path, dirname,
'%s.js' % DEFAULT_PROGRAM_MODULE)
if os.path.exists(program):
base_json['main'] = DEFAULT_PROGRAM_MODULE
break
base_json.root_dir = path
return base_json
def _is_same_file(a, b):
if hasattr(os.path, 'samefile'):
return os.path.samefile(a, b)
return a == b
def build_config(root_dir, target_cfg):
dirs_to_scan = []
def add_packages_from_config(pkgconfig):
if 'packages' in pkgconfig:
for package_dir in resolve_dirs(pkgconfig, pkgconfig.packages):
dirs_to_scan.append(package_dir)
add_packages_from_config(target_cfg)
packages_dir = os.path.join(root_dir, 'packages')
if os.path.exists(packages_dir) and os.path.isdir(packages_dir):
dirs_to_scan.append(packages_dir)
packages = Bunch({target_cfg.name: target_cfg})
while dirs_to_scan:
packages_dir = dirs_to_scan.pop()
package_paths = [os.path.join(packages_dir, dirname)
for dirname in os.listdir(packages_dir)
if not dirname.startswith('.')]
for path in package_paths:
pkgconfig = get_config_in_dir(path)
if pkgconfig.name in packages:
otherpkg = packages[pkgconfig.name]
if not _is_same_file(otherpkg.root_dir, path):
raise DuplicatePackageError(path, otherpkg.root_dir)
else:
packages[pkgconfig.name] = pkgconfig
add_packages_from_config(pkgconfig)
return Bunch(packages=packages)
def get_deps_for_targets(pkg_cfg, targets):
visited = []
deps_left = [[dep, None] for dep in list(targets)]
while deps_left:
[dep, required_by] = deps_left.pop()
if dep not in visited:
visited.append(dep)
if dep not in pkg_cfg.packages:
required_reason = ("required by '%s'" % (required_by)) \
if required_by is not None \
else "specified as target"
raise PackageNotFoundError(dep, required_reason)
dep_cfg = pkg_cfg.packages[dep]
deps_left.extend([[i, dep] for i in dep_cfg.get('dependencies', [])])
return visited
def generate_build_for_target(pkg_cfg, target, deps, prefix='',
include_tests=True,
include_dep_tests=False,
default_loader=DEFAULT_LOADER):
validate_resource_hostname(prefix)
manifest = []
build = Bunch(resources=Bunch(),
resourcePackages=Bunch(),
packageData=Bunch(),
rootPaths=[],
manifest=manifest,
)
def add_section_to_build(cfg, section, is_code=False,
is_data=False):
if section in cfg:
dirnames = cfg[section]
if isinstance(dirnames, basestring):
# This is just for internal consistency within this
# function, it has nothing to do w/ a non-canonical
# configuration dict.
dirnames = [dirnames]
for dirname in resolve_dirs(cfg, dirnames):
name = "-".join([prefix + cfg.name,
os.path.basename(dirname)])
validate_resource_hostname(name)
if name in build.resources:
raise KeyError('resource already defined', name)
build.resourcePackages[name] = cfg.name
build.resources[name] = dirname
resource_url = 'resource://%s/' % name
if is_code:
build.rootPaths.insert(0, resource_url)
from manifest import scan_package
pkg_manifest, problems = scan_package(cfg.name, dirname)
if problems:
# the relevant instructions have already been written
# to stderr
raise BadChromeMarkerError()
manifest.extend(pkg_manifest)
if is_data:
build.packageData[cfg.name] = resource_url
def add_dep_to_build(dep):
dep_cfg = pkg_cfg.packages[dep]
add_section_to_build(dep_cfg, "lib", is_code=True)
add_section_to_build(dep_cfg, "data", is_data=True)
if include_tests and include_dep_tests:
add_section_to_build(dep_cfg, "tests", is_code=True)
if ("loader" in dep_cfg) and ("loader" not in build):
build.loader = "resource://%s-%s" % (prefix + dep,
dep_cfg.loader)
target_cfg = pkg_cfg.packages[target]
if include_tests and not include_dep_tests:
add_section_to_build(target_cfg, "tests", is_code=True)
for dep in deps:
add_dep_to_build(dep)
if 'loader' not in build:
add_dep_to_build(DEFAULT_LOADER)
return build
def call_plugins(pkg_cfg, deps):
for dep in deps:
dep_cfg = pkg_cfg.packages[dep]
dirnames = dep_cfg.get('python-lib', [])
for dirname in resolve_dirs(dep_cfg, dirnames):
sys.path.append(dirname)
module_names = dep_cfg.get('python-plugins', [])
for module_name in module_names:
module = __import__(module_name)
module.init(root_dir=dep_cfg.root_dir)
def call_cmdline_tool(env_root, pkg_name):
pkg_cfg = build_config(env_root, Bunch(name='dummy'))
if pkg_name not in pkg_cfg.packages:
print "This tool requires the '%s' package." % pkg_name
sys.exit(1)
cfg = pkg_cfg.packages[pkg_name]
for dirname in resolve_dirs(cfg, cfg['python-lib']):
sys.path.append(dirname)
module_name = cfg.get('python-cmdline-tool')
module = __import__(module_name)
module.run()
| |
import os
import sys
import logging
import argparse
from .discovery import UPnPLoopbackException
from .environment import Environment
from .config import get_cache, in_home, WemoConfiguration
from .utils import matcher
reqlog = logging.getLogger("requests.packages.urllib3.connectionpool")
reqlog.disabled = True
NOOP = lambda *x: None
def _state(device, readable=False):
state = device.get_state(force_update=True)
if readable:
return "on" if state else "off"
else:
return state
def scan(args, on_switch=NOOP, on_motion=NOOP, on_bridge=NOOP, on_maker=NOOP):
try:
env = Environment(on_switch, on_motion, on_bridge, on_maker, with_subscribers=False,
bind=args.bind, with_cache=args.use_cache)
env.start()
with get_cache() as c:
if c.empty:
args.use_cache = False
if (args.use_cache is not None and not args.use_cache) or (
env._config.cache is not None and not env._config.cache):
env.discover(args.timeout)
except KeyboardInterrupt:
sys.exit(0)
except UPnPLoopbackException:
print """
Loopback interface is being used! You will probably not receive any responses
from devices. Use ifconfig to find your IP address, then either pass the
--bind argument or edit ~/.wemo/config.yml to specify the IP to which devices
should call back during discovery.""".strip()
sys.exit(1)
def switch(args):
if args.state.lower() in ("on", "1", "true"):
state = "on"
elif args.state.lower() in ("off", "0", "false"):
state = "off"
elif args.state.lower() == "toggle":
state = "toggle"
elif args.state.lower() == "status":
state = "status"
else:
print """No valid action specified.
Usage: wemo switch NAME (on|off|toggle|status)"""
sys.exit(1)
device_name = args.device
alias = WemoConfiguration().aliases.get(device_name)
if alias:
matches = lambda x:x == alias
elif device_name:
matches = matcher(device_name)
else:
matches = NOOP
def on_switch(switch):
if matches(switch.name):
if state == "toggle":
found_state = switch.get_state(force_update=True)
switch.set_state(not found_state)
elif state == "status":
print _state(switch, args.human_readable)
else:
getattr(switch, state)()
sys.exit(0)
scan(args, on_switch)
# If we got here, we didn't find anything
print "No device found with that name."
sys.exit(1)
def light(args):
if args.state.lower() in ("on", "1", "true"):
state = "on"
elif args.state.lower() in ("off", "0", "false"):
state = "off"
elif args.state.lower() == "toggle":
state = "toggle"
elif args.state.lower() == "status":
state = "status"
else:
print """No valid action specified.
Usage: wemo light NAME (on|off|toggle|status)"""
sys.exit(1)
device_name = args.name
alias = WemoConfiguration().aliases.get(device_name)
if alias:
matches = lambda x:x == alias
elif device_name:
matches = matcher(device_name)
else:
matches = NOOP
def on_switch(switch):
pass
def on_motion(motion):
pass
def on_bridge(bridge):
bridge.bridge_get_lights()
for light in bridge.Lights:
if matches(light):
if args.state == "toggle":
found_state = bridge.light_get_state(bridge.Lights[light]).get('state')
bridge.light_set_state(bridge.Lights[light], state=not found_state)
elif args.state == "status":
print bridge.light_get_state(bridge.Lights[light])
else:
if args.dim == None and args.state == "on":
dim = bridge.light_get_state(bridge.Lights[light]).get('dim')
state = 1
elif args.state == "off":
dim = None
state = 0
elif args.dim <= 255 and args.dim >= 0:
dim = args.dim
state = 1
else:
print """Invalid dim specified.
Dim must be between 0 and 255"""
sys.exit(1)
bridge.light_set_state(bridge.Lights[light],state=state,dim=dim)
sys.exit(0)
for group in bridge.Groups:
if matches(group):
if args.state == "toggle":
found_state = bridge.group_get_state(bridge.Groups[group]).get('state')
bridge.group_set_state(bridge.Groups[group], state=not found_state)
elif args.state == "status":
print bridge.group_get_state(bridge.Groups[group])
else:
if args.dim == None and args.state == "on":
dim = bridge.group_get_state(bridge.Groups[group]).get('dim')
state = 1
elif args.state == "off":
dim = None
state = 0
elif args.dim <= 255 and args.dim >= 0:
dim = args.dim
state = 1
else:
print """Invalid dim specified.
Dim must be between 0 and 255"""
sys.exit(1)
bridge.group_set_state(bridge.Groups[group],state=state,dim=dim)
sys.exit(0)
scan(args, on_switch, on_motion, on_bridge)
# If we got here, we didn't find anything
print "No device or group found with that name."
sys.exit(1)
def maker(args):
if args.state.lower() in ("on", "1", "true"):
state = "on"
elif args.state.lower() in ("off", "0", "false"):
state = "off"
elif args.state.lower() == "toggle":
state = "toggle"
elif args.state.lower() == "sensor":
state = "sensor"
elif args.state.lower() == "switch":
state = "switch"
else:
print """No valid action specified.
Usage: wemo maker NAME (on|off|toggle|sensor|switch)"""
sys.exit(1)
device_name = args.device
alias = WemoConfiguration().aliases.get(device_name)
if alias:
matches = lambda x:x == alias
elif device_name:
matches = matcher(device_name)
else:
matches = NOOP
def on_switch(maker):
return
def on_motion(maker):
return
def on_maker(maker):
if matches(maker.name):
if state == "toggle":
found_state = maker.get_state(force_update=True)
maker.set_state(not found_state)
elif state == "sensor":
if maker.has_sensor:
if args.human_readable:
if maker.sensor_state:
sensorstate = 'Sensor not triggered'
else:
sensorstate = 'Sensor triggered'
print sensorstate
else:
print maker.sensor_state
else:
print "Sensor not present"
elif state == "switch":
if maker.switch_mode:
print "Momentary Switch"
else:
print _state(maker, args.human_readable)
else:
getattr(maker, state)()
sys.exit(0)
scan(args, on_switch, on_motion, on_maker)
# If we got here, we didn't find anything
print "No device found with that name."
sys.exit(1)
def list_(args):
def on_switch(switch):
print "Switch:", switch.name
def on_motion(motion):
print "Motion:", motion.name
def on_maker(maker):
print "Maker:", maker.name
def on_bridge(bridge):
print "Bridge:", bridge.name
bridge.bridge_get_lights()
for group in bridge.Groups:
print "Group:", group
for light in bridge.Lights:
print "Light:", light
scan(args, on_switch, on_motion, on_bridge, on_maker)
def status(args):
def on_switch(switch):
print "Switch:", switch.name, '\t', _state(switch, args.human_readable)
def on_motion(motion):
print "Motion:", motion.name, '\t', _state(motion, args.human_readable)
def on_maker(maker):
if maker.switch_mode:
print "Maker:", maker.name, '\t', "Momentary State:", _state(maker, args.human_readable)
else:
print "Maker:", maker.name, '\t', "Persistent State:", _state(maker, args.human_readable)
if maker.has_sensor:
if args.human_readable:
if maker.sensor_state:
sensorstate = 'Sensor not triggered'
else:
sensorstate = 'Sensor triggered'
print '\t\t\t', "Sensor:", sensorstate
else:
print '\t\t\t', "Sensor:", maker.sensor_state
else:
print '\t\t\t' "Sensor not present"
def on_bridge(bridge):
print "Bridge:", bridge.name, '\t', _state(bridge, args.human_readable)
bridge.bridge_get_lights()
for light in bridge.Lights:
print "Light:", light, '\t', bridge.light_get_state(bridge.Lights[light])
for group in bridge.Groups:
print "Group:", group, '\t', bridge.group_get_state(bridge.Groups[group])
scan(args, on_switch, on_motion, on_bridge, on_maker)
def clear(args):
for fname in 'cache', 'cache.db':
filename = in_home('.wemo', fname)
try:
os.remove(filename)
except OSError:
# File didn't exist; cache must be clear
pass
print "Device cache cleared."
def server(args):
from socketio.server import SocketIOServer
from ouimeaux.server import app, initialize
initialize(bind=getattr(args, 'bind', None))
level = logging.INFO
if getattr(args, 'debug', False):
level = logging.DEBUG
logging.basicConfig(level=level)
try:
# TODO: Move this to configuration
listen = WemoConfiguration().listen or '0.0.0.0:5000'
try:
host, port = listen.split(':')
except Exception:
print "Invalid bind address configuration:", listen
sys.exit(1)
SocketIOServer((host, int(port)), app,
policy_server=False,
namespace="socket.io").serve_forever()
except (KeyboardInterrupt, SystemExit):
sys.exit(0)
def wemo():
import ouimeaux.utils
ouimeaux.utils._RETRIES = 0
parser = argparse.ArgumentParser()
parser.add_argument("-b", "--bind", default=None,
help="ip:port to which to bind the response server."
" Default is localhost:54321")
parser.add_argument("-d", "--debug", action="store_true", default=False,
help="Enable debug logging")
parser.add_argument("-e", "--exact-match", action="store_true",
default=False,
help="Disable fuzzy matching for device names")
parser.add_argument("-f", "--no-cache", dest="use_cache", default=None,
action="store_false",
help="Disable the device cache")
parser.add_argument("-v", "--human-readable", dest="human_readable",
action="store_true", default=False,
help="Print statuses as human-readable words")
parser.add_argument("-t", "--timeout", type=int, default=5,
help="Time in seconds to allow for discovery")
subparsers = parser.add_subparsers()
clearparser = subparsers.add_parser("clear",
help="Clear the device cache")
clearparser.set_defaults(func=clear)
statusparser = subparsers.add_parser("status",
help="Print status of WeMo devices")
statusparser.set_defaults(func=status)
stateparser = subparsers.add_parser("switch",
help="Turn a WeMo Switch on or off")
stateparser.add_argument("device", help="Name or alias of the device")
stateparser.add_argument("state", help="'on' or 'off'")
stateparser.set_defaults(func=switch)
makerparser = subparsers.add_parser("maker",
help="Get sensor or switch state of a Maker or Turn on or off")
makerparser.add_argument("device", help="Name or alias of the device")
makerparser.add_argument("state", help="'on' or 'off' or 'toggle' or 'sensor' or 'switch'")
makerparser.set_defaults(func=maker)
stateparser = subparsers.add_parser("light",
help="Turn a WeMo LED light on or off")
stateparser.add_argument("name", help="Name or alias of the device or group")
stateparser.add_argument("state", help="'on' or 'off'")
stateparser.add_argument("dim", nargs='?', type=int,
help="Dim value 0 to 255")
stateparser.set_defaults(func=light)
listparser = subparsers.add_parser("list",
help="List all devices found in the environment")
listparser.set_defaults(func=list_)
serverparser = subparsers.add_parser("server",
help="Run the API server and web app")
serverparser.set_defaults(func=server)
args = parser.parse_args()
if getattr(args, 'debug', False):
logging.basicConfig(level=logging.DEBUG)
args.func(args)
| |
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 OpenStack Foundation
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tables
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.instances \
import forms as project_forms
from openstack_dashboard.dashboards.admin.instances \
import tables as project_tables
from openstack_dashboard.dashboards.project.instances import views
from openstack_dashboard.dashboards.project.instances.workflows \
import update_instance
# re-use console from project.instances.views to make reflection work
def console(args, **kvargs):
return views.console(args, **kvargs)
# re-use vnc from project.instances.views to make reflection work
def vnc(args, **kvargs):
return views.vnc(args, **kvargs)
# re-use spice from project.instances.views to make reflection work
def spice(args, **kvargs):
return views.spice(args, **kvargs)
# re-use rdp from project.instances.views to make reflection work
def rdp(args, **kvargs):
return views.rdp(args, **kvargs)
class AdminUpdateView(views.UpdateView):
workflow_class = update_instance.AdminUpdateInstance
success_url = reverse_lazy("horizon:admin:instances:index")
class AdminIndexView(tables.DataTableView):
table_class = project_tables.AdminInstancesTable
template_name = 'admin/instances/index.html'
page_title = _("Instances")
def has_more_data(self, table):
return self._more
def get_data(self):
instances = []
marker = self.request.GET.get(
project_tables.AdminInstancesTable._meta.pagination_param, None)
search_opts = self.get_filters({'marker': marker, 'paginate': True})
# Gather our tenants to correlate against IDs
try:
tenants, has_more = api.keystone.tenant_list(self.request)
except Exception:
tenants = []
msg = _('Unable to retrieve instance project information.')
exceptions.handle(self.request, msg)
if 'project' in search_opts:
ten_filter_ids = [t.id for t in tenants
if t.name == search_opts['project']]
del search_opts['project']
if len(ten_filter_ids) > 0:
search_opts['tenant_id'] = ten_filter_ids[0]
else:
self._more = False
return []
try:
instances, self._more = api.nova.server_list(
self.request,
search_opts=search_opts,
all_tenants=True)
except Exception:
self._more = False
exceptions.handle(self.request,
_('Unable to retrieve instance list.'))
if instances:
try:
api.network.servers_update_addresses(self.request, instances,
all_tenants=True)
except Exception:
exceptions.handle(
self.request,
message=_('Unable to retrieve IP addresses from Neutron.'),
ignore=True)
# Gather our flavors to correlate against IDs
try:
flavors = api.nova.flavor_list(self.request)
except Exception:
# If fails to retrieve flavor list, creates an empty list.
flavors = []
full_flavors = OrderedDict([(f.id, f) for f in flavors])
tenant_dict = OrderedDict([(t.id, t) for t in tenants])
# Loop through instances to get flavor and tenant info.
for inst in instances:
flavor_id = inst.flavor["id"]
try:
if flavor_id in full_flavors:
inst.full_flavor = full_flavors[flavor_id]
else:
# If the flavor_id is not in full_flavors list,
# gets it via nova api.
inst.full_flavor = api.nova.flavor_get(
self.request, flavor_id)
except Exception:
msg = _('Unable to retrieve instance size information.')
exceptions.handle(self.request, msg)
tenant = tenant_dict.get(inst.tenant_id, None)
inst.tenant_name = getattr(tenant, "name", None)
return instances
def get_filters(self, filters):
filter_field = self.table.get_filter_field()
filter_action = self.table._meta._filter_action
if filter_action.is_api_filter(filter_field):
filter_string = self.table.get_filter_string()
if filter_field and filter_string:
filters[filter_field] = filter_string
return filters
class LiveMigrateView(forms.ModalFormView):
form_class = project_forms.LiveMigrateForm
template_name = 'admin/instances/live_migrate.html'
context_object_name = 'instance'
success_url = reverse_lazy("horizon:admin:instances:index")
page_title = _("Live Migrate")
success_label = page_title
def get_context_data(self, **kwargs):
context = super(LiveMigrateView, self).get_context_data(**kwargs)
context["instance_id"] = self.kwargs['instance_id']
return context
@memoized.memoized_method
def get_hosts(self, *args, **kwargs):
try:
return api.nova.host_list(self.request)
except Exception:
redirect = reverse("horizon:admin:instances:index")
msg = _('Unable to retrieve host information.')
exceptions.handle(self.request, msg, redirect=redirect)
@memoized.memoized_method
def get_object(self, *args, **kwargs):
instance_id = self.kwargs['instance_id']
try:
return api.nova.server_get(self.request, instance_id)
except Exception:
redirect = reverse("horizon:admin:instances:index")
msg = _('Unable to retrieve instance details.')
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
initial = super(LiveMigrateView, self).get_initial()
_object = self.get_object()
if _object:
current_host = getattr(_object, 'OS-EXT-SRV-ATTR:host', '')
initial.update({'instance_id': self.kwargs['instance_id'],
'current_host': current_host,
'hosts': self.get_hosts()})
return initial
class DetailView(views.DetailView):
redirect_url = 'horizon:admin:instances:index'
image_url = 'horizon:admin:images:detail'
volume_url = 'horizon:admin:volumes:volumes:detail'
def _get_actions(self, instance):
table = project_tables.AdminInstancesTable(self.request)
return table.render_row_actions(instance)
| |
#!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
#XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
SRC_DIR = "/home/app/content"
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code != None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "pkgcmd" in cmd:
cmd = "su - %s -c '%s;%s'" % (PARAMETERS.user, XW_ENV, cmd)
return cmd
def getUSERID():
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell id -u %s" % (
PARAMETERS.device, PARAMETERS.user)
else:
cmd = "ssh %s \"id -u %s\"" % (
PARAMETERS.device, PARAMETERS.user )
return doCMD(cmd)
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_pkg_id = None
for line in output:
pkg_infos = line.split()
if len(pkg_infos) == 4:
continue
name = pkg_infos[5]
name = name.lstrip('[').rstrip(']')
print "name is: %s" % name
if pkg_name == name:
test_pkg_id = pkg_infos[3]
test_pkg_id = test_pkg_id.lstrip('[').rstrip(']')
print test_pkg_id
break
return test_pkg_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".wgt"):
pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0]))
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD(
"pkgcmd -u -t wgt -q -n %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith("%s.wgt" % PKG_NAME):
if not doRemoteCopy(os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
action_status = False
(return_code, output) = doRemoteCMD(
"pkgcmd -i -t wgt -q -p %s/%s" % (SRC_DIR, file))
doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
for line in output:
if "Failure" in line:
action_status = False
break
# Do some special copy/delete... steps
'''
(return_code, output) = doRemoteCMD(
"mkdir -p %s/tests" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
if not doRemoteCopy("specname/tests", "%s/tests" % PKG_SRC_DIR):
action_status = False
'''
for item in glob.glob("%s/*" % SCRIPT_DIR):
if item.endswith("inst.py"):
continue
else:
item_name = os.path.basename(item)
if not doRemoteCopy(item, "%s/%s" % (PKG_SRC_DIR, item_name)):
action_status = False
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
opts_parser.add_option(
"-a", dest="user", action="store", help="User name")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception, e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.user:
PARAMETERS.user = "app"
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
user_info = getUSERID()
re_code = user_info[0]
if re_code == 0 :
global XW_ENV
userid = user_info[1][0]
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/%s/dbus/user_bus_socket"%str(userid)
else:
print "[Error] cmd commands error : %s"%str(user_info[1])
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
| |
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.compute import utils as compute_utils
from nova import db
from nova import exception
from nova import objects
from nova.objects import base
from nova.objects import fields
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class Aggregate(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: String attributes updated to support unicode
VERSION = '1.1'
fields = {
'id': fields.IntegerField(),
'name': fields.StringField(),
'hosts': fields.ListOfStringsField(nullable=True),
'metadata': fields.DictOfStringsField(nullable=True),
}
obj_extra_fields = ['availability_zone']
@staticmethod
def _from_db_object(context, aggregate, db_aggregate):
for key in aggregate.fields:
if key == 'metadata':
db_key = 'metadetails'
else:
db_key = key
aggregate[key] = db_aggregate[db_key]
aggregate._context = context
aggregate.obj_reset_changes()
return aggregate
def _assert_no_hosts(self, action):
if 'hosts' in self.obj_what_changed():
raise exception.ObjectActionError(
action=action,
reason='hosts updated inline')
@base.remotable_classmethod
def get_by_id(cls, context, aggregate_id):
db_aggregate = db.aggregate_get(context, aggregate_id)
return cls._from_db_object(context, cls(), db_aggregate)
@base.remotable
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
self._assert_no_hosts('create')
updates = self.obj_get_changes()
payload = dict(updates)
if 'metadata' in updates:
# NOTE(danms): For some reason the notification format is weird
payload['meta_data'] = payload.pop('metadata')
compute_utils.notify_about_aggregate_update(self._context,
"create.start",
payload)
metadata = updates.pop('metadata', None)
db_aggregate = db.aggregate_create(self._context, updates,
metadata=metadata)
self._from_db_object(self._context, self, db_aggregate)
payload['aggregate_id'] = self.id
compute_utils.notify_about_aggregate_update(self._context,
"create.end",
payload)
@base.remotable
def save(self):
self._assert_no_hosts('save')
updates = self.obj_get_changes()
payload = {'aggregate_id': self.id}
if 'metadata' in updates:
payload['meta_data'] = updates['metadata']
compute_utils.notify_about_aggregate_update(self._context,
"updateprop.start",
payload)
updates.pop('id', None)
db_aggregate = db.aggregate_update(self._context, self.id, updates)
compute_utils.notify_about_aggregate_update(self._context,
"updateprop.end",
payload)
self._from_db_object(self._context, self, db_aggregate)
@base.remotable
def update_metadata(self, updates):
payload = {'aggregate_id': self.id,
'meta_data': updates}
compute_utils.notify_about_aggregate_update(self._context,
"updatemetadata.start",
payload)
to_add = {}
for key, value in updates.items():
if value is None:
try:
db.aggregate_metadata_delete(self._context, self.id, key)
except exception.AggregateMetadataNotFound:
pass
try:
self.metadata.pop(key)
except KeyError:
pass
else:
to_add[key] = value
self.metadata[key] = value
db.aggregate_metadata_add(self._context, self.id, to_add)
compute_utils.notify_about_aggregate_update(self._context,
"updatemetadata.end",
payload)
self.obj_reset_changes(fields=['metadata'])
@base.remotable
def destroy(self):
db.aggregate_delete(self._context, self.id)
@base.remotable
def add_host(self, host):
db.aggregate_host_add(self._context, self.id, host)
if self.hosts is None:
self.hosts = []
self.hosts.append(host)
self.obj_reset_changes(fields=['hosts'])
@base.remotable
def delete_host(self, host):
db.aggregate_host_delete(self._context, self.id, host)
self.hosts.remove(host)
self.obj_reset_changes(fields=['hosts'])
@property
def availability_zone(self):
return self.metadata.get('availability_zone', None)
@base.NovaObjectRegistry.register
class AggregateList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Added key argument to get_by_host()
# Aggregate <= version 1.1
# Version 1.2: Added get_by_metadata_key
VERSION = '1.2'
fields = {
'objects': fields.ListOfObjectsField('Aggregate'),
}
# NOTE(danms): Aggregate was at 1.1 before we added this
obj_relationships = {
'objects': [('1.0', '1.1'), ('1.1', '1.1'), ('1.2', '1.1')],
}
@classmethod
def _filter_db_aggregates(cls, db_aggregates, hosts):
if not isinstance(hosts, set):
hosts = set(hosts)
filtered_aggregates = []
for db_aggregate in db_aggregates:
for host in db_aggregate['hosts']:
if host in hosts:
filtered_aggregates.append(db_aggregate)
break
return filtered_aggregates
@base.remotable_classmethod
def get_all(cls, context):
db_aggregates = db.aggregate_get_all(context)
return base.obj_make_list(context, cls(context), objects.Aggregate,
db_aggregates)
@base.remotable_classmethod
def get_by_host(cls, context, host, key=None):
db_aggregates = db.aggregate_get_by_host(context, host, key=key)
return base.obj_make_list(context, cls(context), objects.Aggregate,
db_aggregates)
@base.remotable_classmethod
def get_by_metadata_key(cls, context, key, hosts=None):
db_aggregates = db.aggregate_get_by_metadata_key(context, key=key)
if hosts is not None:
db_aggregates = cls._filter_db_aggregates(db_aggregates, hosts)
return base.obj_make_list(context, cls(context), objects.Aggregate,
db_aggregates)
| |
import sys
from django.db import models
from django.db.models.fields.related import ForeignObjectRel
from django.db.models.fields.related import (
ForeignObjectRel, ManyToManyRel, ManyToOneRel, OneToOneRel, RelatedField
)
from django.core.mail import EmailMultiAlternatives, EmailMessage
try:
# Django >= 1.9
from django.utils.module_loading import import_module
except ImportError:
from django.utils.importlib import import_module
import hashlib
from django.conf import settings
_ver = sys.version_info
is_py2 = (_ver[0] == 2)
is_py3 = (_ver[0] == 3)
if is_py2:
basestring = basestring
unicode = unicode
elif is_py3:
basestring = (str, bytes)
unicode = str
def get_fields(Model,
parent_field="",
model_stack=None,
stack_limit=3,
excludes=['permissions', 'comment', 'content_type']):
"""
Given a Model, return a list of lists of strings with important stuff:
...
['test_user__user__customuser', 'customuser', 'User', 'RelatedObject']
['test_user__unique_id', 'unique_id', 'TestUser', 'CharField']
['test_user__confirmed', 'confirmed', 'TestUser', 'BooleanField']
...
"""
out_fields = []
if model_stack is None:
model_stack = []
# github.com/omab/python-social-auth/commit/d8637cec02422374e4102231488481170dc51057
if isinstance(Model, basestring):
app_label, model_name = Model.split('.')
Model = models.get_model(app_label, model_name)
#fields = Model._meta.fields + Model._meta.many_to_many + tuple(Model._meta.get_all_related_objects())
fields = Model._meta.get_fields()
model_stack.append(Model)
# do a variety of checks to ensure recursion isnt being redundant
stop_recursion = False
if len(model_stack) > stack_limit:
# rudimentary CustomUser->User->CustomUser->User detection
if model_stack[-3] == model_stack[-1]:
stop_recursion = True
# stack depth shouldn't exceed x
if len(model_stack) > 5:
stop_recursion = True
# we've hit a point where we are repeating models
if len(set(model_stack)) != len(model_stack):
stop_recursion = True
if stop_recursion:
return [] # give empty list for "extend"
for field in fields:
field_name = field.name
# if instance(field, Man)
if isinstance(field, ForeignObjectRel):
# from pdb import set_trace
# set_trace()
# print (field, type(field))
field_name = field.field.related_query_name()
if parent_field:
full_field = "__".join([parent_field, field_name])
else:
full_field = field_name
# print (field, field_name, full_field)
if len([True for exclude in excludes if (exclude in full_field)]):
continue
# add to the list
out_fields.append([full_field, field_name, Model, field.__class__])
if not stop_recursion and \
(isinstance(field, ForeignObjectRel) or isinstance(field, OneToOneRel) or isinstance(field, RelatedField) or isinstance(field, ManyToManyRel) or isinstance(field, ManyToOneRel)):
# from pdb import set_trace
# set_trace()
if not isinstance(field, ForeignObjectRel):
RelModel = field.model
# print(RelModel)
else:
RelModel = field.related_model
# print(RelModel)
# if isinstance(field, OneToOneRel):
# print(field.related_model)
# print(field.related_model)
# print(RelModel)
# if isinstance(field, OneToOneRel):
# RelModel = field.model
# out_fields.extend(get_fields(RelModel, full_field, True))
# else:
# # RelModel = field.related.parent_model
# RelModel = field.related_model.parent_model
out_fields.extend(get_fields(RelModel, full_field, list(model_stack)))
return out_fields
def give_model_field(full_field, Model):
"""
Given a field_name and Model:
"test_user__unique_id", <AchievedGoal>
Returns "test_user__unique_id", "id", <Model>, <ModelField>
"""
field_data = get_fields(Model, '', [])
for full_key, name, _Model, _ModelField in field_data:
if full_key == full_field:
return full_key, name, _Model, _ModelField
raise Exception('Field key `{0}` not found on `{1}`.'.format(full_field, Model.__name__))
def get_simple_fields(Model, **kwargs):
return [[f[0], f[3].__name__] for f in get_fields(Model, **kwargs)]
def chunked(iterator, chunksize):
"""
Yields items from 'iterator' in chunks of size 'chunksize'.
>>> list(chunked([1, 2, 3, 4, 5], chunksize=2))
[(1, 2), (3, 4), (5,)]
"""
chunk = []
for idx, item in enumerate(iterator, 1):
chunk.append(item)
if idx % chunksize == 0:
yield chunk
chunk = []
if chunk:
yield chunk
def email_to_dict(message):
if isinstance(message, dict):
return message
message_dict = {'subject': message.subject,
'body': message.body,
'from_email': message.from_email,
'to': message.to,
# 'bcc': message.bcc,
# ignore connection
'attachments': message.attachments, #TODO: need attachments and headers passed correctly
'headers': message.extra_headers,
#'cc': message.cc
#'user_id': message.user.id,
#'drip_id': message.drip.id
}
# Django 1.8 support
# https://docs.djangoproject.com/en/1.8/topics/email/#django.core.mail.EmailMessage
if hasattr(message, 'reply_to'):
message_dict['reply_to'] = message.reply_to
if hasattr(message, 'alternatives'):
message_dict['alternatives'] = message.alternatives
if message.content_subtype != EmailMessage.content_subtype:
message_dict["content_subtype"] = message.content_subtype
if message.mixed_subtype != EmailMessage.mixed_subtype:
message_dict["mixed_subtype"] = message.mixed_subtype
return message_dict
def dict_to_email(messagedict):
if isinstance(messagedict, dict) and "content_subtype" in messagedict:
content_subtype = messagedict["content_subtype"]
del messagedict["content_subtype"]
else:
content_subtype = None
if isinstance(messagedict, dict) and "mixed_subtype" in messagedict:
mixed_subtype = messagedict["mixed_subtype"]
del messagedict["mixed_subtype"]
else:
mixed_subtype = None
if hasattr(messagedict, 'from_email'):
ret = messagedict
elif 'alternatives' in messagedict:
ret = EmailMultiAlternatives(**messagedict)
else:
ret = EmailMessage(**messagedict)
if content_subtype:
ret.content_subtype = content_subtype
messagedict["content_subtype"] = content_subtype # bring back content subtype for 'retry'
if mixed_subtype:
ret.mixed_subtype = mixed_subtype
messagedict["mixed_subtype"] = mixed_subtype # bring back mixed subtype for 'retry'
return ret
def class_for(path):
mod_name, klass_name = path.rsplit('.', 1)
mod = import_module(mod_name)
klass = getattr(mod, klass_name)
return klass
def get_token_for_email(email):
m = hashlib.md5(email.encode('utf-8') + settings.SECRET_KEY.encode('utf-8')).hexdigest().encode('utf-8')
return m
| |
import unittest
from test import support
import collections, random, string
import collections.abc
import gc, weakref
import pickle
class DictTest(unittest.TestCase):
def test_invalid_keyword_arguments(self):
class Custom(dict):
pass
for invalid in {1 : 2}, Custom({1 : 2}):
with self.assertRaises(TypeError):
dict(**invalid)
with self.assertRaises(TypeError):
{}.update(**invalid)
def test_constructor(self):
# calling built-in types without argument must return empty
self.assertEqual(dict(), {})
self.assertIsNot(dict(), {})
def test_literal_constructor(self):
# check literal constructor for different sized dicts
# (to exercise the BUILD_MAP oparg).
for n in (0, 1, 6, 256, 400):
items = [(''.join(random.sample(string.ascii_letters, 8)), i)
for i in range(n)]
random.shuffle(items)
formatted_items = ('{!r}: {:d}'.format(k, v) for k, v in items)
dictliteral = '{' + ', '.join(formatted_items) + '}'
self.assertEqual(eval(dictliteral), dict(items))
def test_bool(self):
self.assertIs(not {}, True)
self.assertTrue({1: 2})
self.assertIs(bool({}), False)
self.assertIs(bool({1: 2}), True)
def test_keys(self):
d = {}
self.assertEqual(set(d.keys()), set())
d = {'a': 1, 'b': 2}
k = d.keys()
self.assertEqual(set(k), {'a', 'b'})
self.assertIn('a', k)
self.assertIn('b', k)
self.assertIn('a', d)
self.assertIn('b', d)
self.assertRaises(TypeError, d.keys, None)
self.assertEqual(repr(dict(a=1).keys()), "dict_keys(['a'])")
def test_values(self):
d = {}
self.assertEqual(set(d.values()), set())
d = {1:2}
self.assertEqual(set(d.values()), {2})
self.assertRaises(TypeError, d.values, None)
self.assertEqual(repr(dict(a=1).values()), "dict_values([1])")
def test_items(self):
d = {}
self.assertEqual(set(d.items()), set())
d = {1:2}
self.assertEqual(set(d.items()), {(1, 2)})
self.assertRaises(TypeError, d.items, None)
self.assertEqual(repr(dict(a=1).items()), "dict_items([('a', 1)])")
def test_contains(self):
d = {}
self.assertNotIn('a', d)
self.assertFalse('a' in d)
self.assertTrue('a' not in d)
d = {'a': 1, 'b': 2}
self.assertIn('a', d)
self.assertIn('b', d)
self.assertNotIn('c', d)
self.assertRaises(TypeError, d.__contains__)
def test_len(self):
d = {}
self.assertEqual(len(d), 0)
d = {'a': 1, 'b': 2}
self.assertEqual(len(d), 2)
def test_getitem(self):
d = {'a': 1, 'b': 2}
self.assertEqual(d['a'], 1)
self.assertEqual(d['b'], 2)
d['c'] = 3
d['a'] = 4
self.assertEqual(d['c'], 3)
self.assertEqual(d['a'], 4)
del d['b']
self.assertEqual(d, {'a': 4, 'c': 3})
self.assertRaises(TypeError, d.__getitem__)
class BadEq(object):
def __eq__(self, other):
raise Exc()
def __hash__(self):
return 24
d = {}
d[BadEq()] = 42
self.assertRaises(KeyError, d.__getitem__, 23)
class Exc(Exception): pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.__getitem__, x)
def test_clear(self):
d = {1:1, 2:2, 3:3}
d.clear()
self.assertEqual(d, {})
self.assertRaises(TypeError, d.clear, None)
def test_update(self):
d = {}
d.update({1:100})
d.update({2:20})
d.update({1:1, 2:2, 3:3})
self.assertEqual(d, {1:1, 2:2, 3:3})
d.update()
self.assertEqual(d, {1:1, 2:2, 3:3})
self.assertRaises((TypeError, AttributeError), d.update, None)
class SimpleUserDict:
def __init__(self):
self.d = {1:1, 2:2, 3:3}
def keys(self):
return self.d.keys()
def __getitem__(self, i):
return self.d[i]
d.clear()
d.update(SimpleUserDict())
self.assertEqual(d, {1:1, 2:2, 3:3})
class Exc(Exception): pass
d.clear()
class FailingUserDict:
def keys(self):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = 1
def __iter__(self):
return self
def __next__(self):
if self.i:
self.i = 0
return 'a'
raise Exc
return BogonIter()
def __getitem__(self, key):
return key
self.assertRaises(Exc, d.update, FailingUserDict())
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = ord('a')
def __iter__(self):
return self
def __next__(self):
if self.i <= ord('z'):
rtn = chr(self.i)
self.i += 1
return rtn
raise StopIteration
return BogonIter()
def __getitem__(self, key):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
class badseq(object):
def __iter__(self):
return self
def __next__(self):
raise Exc()
self.assertRaises(Exc, {}.update, badseq())
self.assertRaises(ValueError, {}.update, [(1, 2, 3)])
def test_fromkeys(self):
self.assertEqual(dict.fromkeys('abc'), {'a':None, 'b':None, 'c':None})
d = {}
self.assertIsNot(d.fromkeys('abc'), d)
self.assertEqual(d.fromkeys('abc'), {'a':None, 'b':None, 'c':None})
self.assertEqual(d.fromkeys((4,5),0), {4:0, 5:0})
self.assertEqual(d.fromkeys([]), {})
def g():
yield 1
self.assertEqual(d.fromkeys(g()), {1:None})
self.assertRaises(TypeError, {}.fromkeys, 3)
class dictlike(dict): pass
self.assertEqual(dictlike.fromkeys('a'), {'a':None})
self.assertEqual(dictlike().fromkeys('a'), {'a':None})
self.assertIsInstance(dictlike.fromkeys('a'), dictlike)
self.assertIsInstance(dictlike().fromkeys('a'), dictlike)
class mydict(dict):
def __new__(cls):
return collections.UserDict()
ud = mydict.fromkeys('ab')
self.assertEqual(ud, {'a':None, 'b':None})
self.assertIsInstance(ud, collections.UserDict)
self.assertRaises(TypeError, dict.fromkeys)
class Exc(Exception): pass
class baddict1(dict):
def __init__(self):
raise Exc()
self.assertRaises(Exc, baddict1.fromkeys, [1])
class BadSeq(object):
def __iter__(self):
return self
def __next__(self):
raise Exc()
self.assertRaises(Exc, dict.fromkeys, BadSeq())
class baddict2(dict):
def __setitem__(self, key, value):
raise Exc()
self.assertRaises(Exc, baddict2.fromkeys, [1])
# test fast path for dictionary inputs
d = dict(zip(range(6), range(6)))
self.assertEqual(dict.fromkeys(d, 0), dict(zip(range(6), [0]*6)))
class baddict3(dict):
def __new__(cls):
return d
d = {i : i for i in range(10)}
res = d.copy()
res.update(a=None, b=None, c=None)
self.assertEqual(baddict3.fromkeys({"a", "b", "c"}), res)
def test_copy(self):
d = {1:1, 2:2, 3:3}
self.assertEqual(d.copy(), {1:1, 2:2, 3:3})
self.assertEqual({}.copy(), {})
self.assertRaises(TypeError, d.copy, None)
def test_get(self):
d = {}
self.assertIs(d.get('c'), None)
self.assertEqual(d.get('c', 3), 3)
d = {'a': 1, 'b': 2}
self.assertIs(d.get('c'), None)
self.assertEqual(d.get('c', 3), 3)
self.assertEqual(d.get('a'), 1)
self.assertEqual(d.get('a', 3), 1)
self.assertRaises(TypeError, d.get)
self.assertRaises(TypeError, d.get, None, None, None)
def test_setdefault(self):
# dict.setdefault()
d = {}
self.assertIs(d.setdefault('key0'), None)
d.setdefault('key0', [])
self.assertIs(d.setdefault('key0'), None)
d.setdefault('key', []).append(3)
self.assertEqual(d['key'][0], 3)
d.setdefault('key', []).append(4)
self.assertEqual(len(d['key']), 2)
self.assertRaises(TypeError, d.setdefault)
class Exc(Exception): pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.setdefault, x, [])
def test_setdefault_atomic(self):
# Issue #13521: setdefault() calls __hash__ and __eq__ only once.
class Hashed(object):
def __init__(self):
self.hash_count = 0
self.eq_count = 0
def __hash__(self):
self.hash_count += 1
return 42
def __eq__(self, other):
self.eq_count += 1
return id(self) == id(other)
hashed1 = Hashed()
y = {hashed1: 5}
hashed2 = Hashed()
y.setdefault(hashed2, [])
self.assertEqual(hashed1.hash_count, 1)
self.assertEqual(hashed2.hash_count, 1)
self.assertEqual(hashed1.eq_count + hashed2.eq_count, 1)
def test_setitem_atomic_at_resize(self):
class Hashed(object):
def __init__(self):
self.hash_count = 0
self.eq_count = 0
def __hash__(self):
self.hash_count += 1
return 42
def __eq__(self, other):
self.eq_count += 1
return id(self) == id(other)
hashed1 = Hashed()
# 5 items
y = {hashed1: 5, 0: 0, 1: 1, 2: 2, 3: 3}
hashed2 = Hashed()
# 6th item forces a resize
y[hashed2] = []
self.assertEqual(hashed1.hash_count, 1)
self.assertEqual(hashed2.hash_count, 1)
self.assertEqual(hashed1.eq_count + hashed2.eq_count, 1)
def test_popitem(self):
# dict.popitem()
for copymode in -1, +1:
# -1: b has same structure as a
# +1: b is a.copy()
for log2size in range(12):
size = 2**log2size
a = {}
b = {}
for i in range(size):
a[repr(i)] = i
if copymode < 0:
b[repr(i)] = i
if copymode > 0:
b = a.copy()
for i in range(size):
ka, va = ta = a.popitem()
self.assertEqual(va, int(ka))
kb, vb = tb = b.popitem()
self.assertEqual(vb, int(kb))
self.assertFalse(copymode < 0 and ta != tb)
self.assertFalse(a)
self.assertFalse(b)
d = {}
self.assertRaises(KeyError, d.popitem)
def test_pop(self):
# Tests for pop with specified key
d = {}
k, v = 'abc', 'def'
d[k] = v
self.assertRaises(KeyError, d.pop, 'ghi')
self.assertEqual(d.pop(k), v)
self.assertEqual(len(d), 0)
self.assertRaises(KeyError, d.pop, k)
self.assertEqual(d.pop(k, v), v)
d[k] = v
self.assertEqual(d.pop(k, 1), v)
self.assertRaises(TypeError, d.pop)
class Exc(Exception): pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.pop, x)
def test_mutating_iteration(self):
# changing dict size during iteration
d = {}
d[1] = 1
with self.assertRaises(RuntimeError):
for i in d:
d[i+1] = 1
def test_mutating_lookup(self):
# changing dict during a lookup (issue #14417)
class NastyKey:
mutate_dict = None
def __init__(self, value):
self.value = value
def __hash__(self):
# hash collision!
return 1
def __eq__(self, other):
if NastyKey.mutate_dict:
mydict, key = NastyKey.mutate_dict
NastyKey.mutate_dict = None
del mydict[key]
return self.value == other.value
key1 = NastyKey(1)
key2 = NastyKey(2)
d = {key1: 1}
NastyKey.mutate_dict = (d, key1)
d[key2] = 2
self.assertEqual(d, {key2: 2})
def test_repr(self):
d = {}
self.assertEqual(repr(d), '{}')
d[1] = 2
self.assertEqual(repr(d), '{1: 2}')
d = {}
d[1] = d
self.assertEqual(repr(d), '{1: {...}}')
class Exc(Exception): pass
class BadRepr(object):
def __repr__(self):
raise Exc()
d = {1: BadRepr()}
self.assertRaises(Exc, repr, d)
def test_eq(self):
self.assertEqual({}, {})
self.assertEqual({1: 2}, {1: 2})
class Exc(Exception): pass
class BadCmp(object):
def __eq__(self, other):
raise Exc()
def __hash__(self):
return 1
d1 = {BadCmp(): 1}
d2 = {1: 1}
with self.assertRaises(Exc):
d1 == d2
def test_keys_contained(self):
self.helper_keys_contained(lambda x: x.keys())
self.helper_keys_contained(lambda x: x.items())
def helper_keys_contained(self, fn):
# Test rich comparisons against dict key views, which should behave the
# same as sets.
empty = fn(dict())
empty2 = fn(dict())
smaller = fn({1:1, 2:2})
larger = fn({1:1, 2:2, 3:3})
larger2 = fn({1:1, 2:2, 3:3})
larger3 = fn({4:1, 2:2, 3:3})
self.assertTrue(smaller < larger)
self.assertTrue(smaller <= larger)
self.assertTrue(larger > smaller)
self.assertTrue(larger >= smaller)
self.assertFalse(smaller >= larger)
self.assertFalse(smaller > larger)
self.assertFalse(larger <= smaller)
self.assertFalse(larger < smaller)
self.assertFalse(smaller < larger3)
self.assertFalse(smaller <= larger3)
self.assertFalse(larger3 > smaller)
self.assertFalse(larger3 >= smaller)
# Inequality strictness
self.assertTrue(larger2 >= larger)
self.assertTrue(larger2 <= larger)
self.assertFalse(larger2 > larger)
self.assertFalse(larger2 < larger)
self.assertTrue(larger == larger2)
self.assertTrue(smaller != larger)
# There is an optimization on the zero-element case.
self.assertTrue(empty == empty2)
self.assertFalse(empty != empty2)
self.assertFalse(empty == smaller)
self.assertTrue(empty != smaller)
# With the same size, an elementwise compare happens
self.assertTrue(larger != larger3)
self.assertFalse(larger == larger3)
def test_errors_in_view_containment_check(self):
class C:
def __eq__(self, other):
raise RuntimeError
d1 = {1: C()}
d2 = {1: C()}
with self.assertRaises(RuntimeError):
d1.items() == d2.items()
with self.assertRaises(RuntimeError):
d1.items() != d2.items()
with self.assertRaises(RuntimeError):
d1.items() <= d2.items()
with self.assertRaises(RuntimeError):
d1.items() >= d2.items()
d3 = {1: C(), 2: C()}
with self.assertRaises(RuntimeError):
d2.items() < d3.items()
with self.assertRaises(RuntimeError):
d3.items() > d2.items()
def test_dictview_set_operations_on_keys(self):
k1 = {1:1, 2:2}.keys()
k2 = {1:1, 2:2, 3:3}.keys()
k3 = {4:4}.keys()
self.assertEqual(k1 - k2, set())
self.assertEqual(k1 - k3, {1,2})
self.assertEqual(k2 - k1, {3})
self.assertEqual(k3 - k1, {4})
self.assertEqual(k1 & k2, {1,2})
self.assertEqual(k1 & k3, set())
self.assertEqual(k1 | k2, {1,2,3})
self.assertEqual(k1 ^ k2, {3})
self.assertEqual(k1 ^ k3, {1,2,4})
def test_dictview_set_operations_on_items(self):
k1 = {1:1, 2:2}.items()
k2 = {1:1, 2:2, 3:3}.items()
k3 = {4:4}.items()
self.assertEqual(k1 - k2, set())
self.assertEqual(k1 - k3, {(1,1), (2,2)})
self.assertEqual(k2 - k1, {(3,3)})
self.assertEqual(k3 - k1, {(4,4)})
self.assertEqual(k1 & k2, {(1,1), (2,2)})
self.assertEqual(k1 & k3, set())
self.assertEqual(k1 | k2, {(1,1), (2,2), (3,3)})
self.assertEqual(k1 ^ k2, {(3,3)})
self.assertEqual(k1 ^ k3, {(1,1), (2,2), (4,4)})
def test_dictview_mixed_set_operations(self):
# Just a few for .keys()
self.assertTrue({1:1}.keys() == {1})
self.assertTrue({1} == {1:1}.keys())
self.assertEqual({1:1}.keys() | {2}, {1, 2})
self.assertEqual({2} | {1:1}.keys(), {1, 2})
# And a few for .items()
self.assertTrue({1:1}.items() == {(1,1)})
self.assertTrue({(1,1)} == {1:1}.items())
self.assertEqual({1:1}.items() | {2}, {(1,1), 2})
self.assertEqual({2} | {1:1}.items(), {(1,1), 2})
def test_missing(self):
# Make sure dict doesn't have a __missing__ method
self.assertFalse(hasattr(dict, "__missing__"))
self.assertFalse(hasattr({}, "__missing__"))
# Test several cases:
# (D) subclass defines __missing__ method returning a value
# (E) subclass defines __missing__ method raising RuntimeError
# (F) subclass sets __missing__ instance variable (no effect)
# (G) subclass doesn't define __missing__ at all
class D(dict):
def __missing__(self, key):
return 42
d = D({1: 2, 3: 4})
self.assertEqual(d[1], 2)
self.assertEqual(d[3], 4)
self.assertNotIn(2, d)
self.assertNotIn(2, d.keys())
self.assertEqual(d[2], 42)
class E(dict):
def __missing__(self, key):
raise RuntimeError(key)
e = E()
with self.assertRaises(RuntimeError) as c:
e[42]
self.assertEqual(c.exception.args, (42,))
class F(dict):
def __init__(self):
# An instance variable __missing__ should have no effect
self.__missing__ = lambda key: None
f = F()
with self.assertRaises(KeyError) as c:
f[42]
self.assertEqual(c.exception.args, (42,))
class G(dict):
pass
g = G()
with self.assertRaises(KeyError) as c:
g[42]
self.assertEqual(c.exception.args, (42,))
def test_tuple_keyerror(self):
# SF #1576657
d = {}
with self.assertRaises(KeyError) as c:
d[(1,)]
self.assertEqual(c.exception.args, ((1,),))
def test_bad_key(self):
# Dictionary lookups should fail if __eq__() raises an exception.
class CustomException(Exception):
pass
class BadDictKey:
def __hash__(self):
return hash(self.__class__)
def __eq__(self, other):
if isinstance(other, self.__class__):
raise CustomException
return other
d = {}
x1 = BadDictKey()
x2 = BadDictKey()
d[x1] = 1
for stmt in ['d[x2] = 2',
'z = d[x2]',
'x2 in d',
'd.get(x2)',
'd.setdefault(x2, 42)',
'd.pop(x2)',
'd.update({x2: 2})']:
with self.assertRaises(CustomException):
exec(stmt, locals())
def test_resize1(self):
# Dict resizing bug, found by Jack Jansen in 2.2 CVS development.
# This version got an assert failure in debug build, infinite loop in
# release build. Unfortunately, provoking this kind of stuff requires
# a mix of inserts and deletes hitting exactly the right hash codes in
# exactly the right order, and I can't think of a randomized approach
# that would be *likely* to hit a failing case in reasonable time.
d = {}
for i in range(5):
d[i] = i
for i in range(5):
del d[i]
for i in range(5, 9): # i==8 was the problem
d[i] = i
def test_resize2(self):
# Another dict resizing bug (SF bug #1456209).
# This caused Segmentation faults or Illegal instructions.
class X(object):
def __hash__(self):
return 5
def __eq__(self, other):
if resizing:
d.clear()
return False
d = {}
resizing = False
d[X()] = 1
d[X()] = 2
d[X()] = 3
d[X()] = 4
d[X()] = 5
# now trigger a resize
resizing = True
d[9] = 6
def test_empty_presized_dict_in_freelist(self):
# Bug #3537: if an empty but presized dict with a size larger
# than 7 was in the freelist, it triggered an assertion failure
with self.assertRaises(ZeroDivisionError):
d = {'a': 1 // 0, 'b': None, 'c': None, 'd': None, 'e': None,
'f': None, 'g': None, 'h': None}
d = {}
def test_container_iterator(self):
# Bug #3680: tp_traverse was not implemented for dictiter and
# dictview objects.
class C(object):
pass
views = (dict.items, dict.values, dict.keys)
for v in views:
obj = C()
ref = weakref.ref(obj)
container = {obj: 1}
obj.v = v(container)
obj.x = iter(obj.v)
del obj, container
gc.collect()
self.assertIs(ref(), None, "Cycle was not collected")
def _not_tracked(self, t):
# Nested containers can take several collections to untrack
gc.collect()
gc.collect()
self.assertFalse(gc.is_tracked(t), t)
def _tracked(self, t):
self.assertTrue(gc.is_tracked(t), t)
gc.collect()
gc.collect()
self.assertTrue(gc.is_tracked(t), t)
@support.cpython_only
def test_track_literals(self):
# Test GC-optimization of dict literals
x, y, z, w = 1.5, "a", (1, None), []
self._not_tracked({})
self._not_tracked({x:(), y:x, z:1})
self._not_tracked({1: "a", "b": 2})
self._not_tracked({1: 2, (None, True, False, ()): int})
self._not_tracked({1: object()})
# Dicts with mutable elements are always tracked, even if those
# elements are not tracked right now.
self._tracked({1: []})
self._tracked({1: ([],)})
self._tracked({1: {}})
self._tracked({1: set()})
@support.cpython_only
def test_track_dynamic(self):
# Test GC-optimization of dynamically-created dicts
class MyObject(object):
pass
x, y, z, w, o = 1.5, "a", (1, object()), [], MyObject()
d = dict()
self._not_tracked(d)
d[1] = "a"
self._not_tracked(d)
d[y] = 2
self._not_tracked(d)
d[z] = 3
self._not_tracked(d)
self._not_tracked(d.copy())
d[4] = w
self._tracked(d)
self._tracked(d.copy())
d[4] = None
self._not_tracked(d)
self._not_tracked(d.copy())
# dd isn't tracked right now, but it may mutate and therefore d
# which contains it must be tracked.
d = dict()
dd = dict()
d[1] = dd
self._not_tracked(dd)
self._tracked(d)
dd[1] = d
self._tracked(dd)
d = dict.fromkeys([x, y, z])
self._not_tracked(d)
dd = dict()
dd.update(d)
self._not_tracked(dd)
d = dict.fromkeys([x, y, z, o])
self._tracked(d)
dd = dict()
dd.update(d)
self._tracked(dd)
d = dict(x=x, y=y, z=z)
self._not_tracked(d)
d = dict(x=x, y=y, z=z, w=w)
self._tracked(d)
d = dict()
d.update(x=x, y=y, z=z)
self._not_tracked(d)
d.update(w=w)
self._tracked(d)
d = dict([(x, y), (z, 1)])
self._not_tracked(d)
d = dict([(x, y), (z, w)])
self._tracked(d)
d = dict()
d.update([(x, y), (z, 1)])
self._not_tracked(d)
d.update([(x, y), (z, w)])
self._tracked(d)
@support.cpython_only
def test_track_subtypes(self):
# Dict subtypes are always tracked
class MyDict(dict):
pass
self._tracked(MyDict())
@support.cpython_only
def test_splittable_setattr_after_pop(self):
"""setattr must not convert combined table into split table"""
# Issue 28147
import _testcapi
class C:
pass
a = C()
a.a = 2
self.assertTrue(_testcapi.dict_hassplittable(a.__dict__))
# dict.popitem() convert it to combined table
a.__dict__.popitem()
self.assertFalse(_testcapi.dict_hassplittable(a.__dict__))
# But C should not convert a.__dict__ to split table again.
a.a = 3
self.assertFalse(_testcapi.dict_hassplittable(a.__dict__))
def test_iterator_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
data = {1:"a", 2:"b", 3:"c"}
it = iter(data)
d = pickle.dumps(it, proto)
it = pickle.loads(d)
self.assertEqual(sorted(it), sorted(data))
it = pickle.loads(d)
try:
drop = next(it)
except StopIteration:
continue
d = pickle.dumps(it, proto)
it = pickle.loads(d)
del data[drop]
self.assertEqual(sorted(it), sorted(data))
def test_itemiterator_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
data = {1:"a", 2:"b", 3:"c"}
# dictviews aren't picklable, only their iterators
itorg = iter(data.items())
d = pickle.dumps(itorg, proto)
it = pickle.loads(d)
# note that the type of the unpickled iterator
# is not necessarily the same as the original. It is
# merely an object supporting the iterator protocol, yielding
# the same objects as the original one.
# self.assertEqual(type(itorg), type(it))
self.assertIsInstance(it, collections.abc.Iterator)
self.assertEqual(dict(it), data)
it = pickle.loads(d)
drop = next(it)
d = pickle.dumps(it, proto)
it = pickle.loads(d)
del data[drop[0]]
self.assertEqual(dict(it), data)
def test_valuesiterator_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL):
data = {1:"a", 2:"b", 3:"c"}
# data.values() isn't picklable, only its iterator
it = iter(data.values())
d = pickle.dumps(it, proto)
it = pickle.loads(d)
self.assertEqual(sorted(list(it)), sorted(list(data.values())))
it = pickle.loads(d)
drop = next(it)
d = pickle.dumps(it, proto)
it = pickle.loads(d)
values = list(it) + [drop]
self.assertEqual(sorted(values), sorted(list(data.values())))
def test_instance_dict_getattr_str_subclass(self):
class Foo:
def __init__(self, msg):
self.msg = msg
f = Foo('123')
class _str(str):
pass
self.assertEqual(f.msg, getattr(f, _str('msg')))
self.assertEqual(f.msg, f.__dict__[_str('msg')])
def test_object_set_item_single_instance_non_str_key(self):
class Foo: pass
f = Foo()
f.__dict__[1] = 1
f.a = 'a'
self.assertEqual(f.__dict__, {1:1, 'a':'a'})
def check_reentrant_insertion(self, mutate):
# This object will trigger mutation of the dict when replaced
# by another value. Note this relies on refcounting: the test
# won't achieve its purpose on fully-GCed Python implementations.
class Mutating:
def __del__(self):
mutate(d)
d = {k: Mutating() for k in 'abcdefghijklmnopqr'}
for k in list(d):
d[k] = k
def test_reentrant_insertion(self):
# Reentrant insertion shouldn't crash (see issue #22653)
def mutate(d):
d['b'] = 5
self.check_reentrant_insertion(mutate)
def mutate(d):
d.update(self.__dict__)
d.clear()
self.check_reentrant_insertion(mutate)
def mutate(d):
while d:
d.popitem()
self.check_reentrant_insertion(mutate)
def test_merge_and_mutate(self):
class X:
def __hash__(self):
return 0
def __eq__(self, o):
other.clear()
return False
l = [(i,0) for i in range(1, 1337)]
other = dict(l)
other[X()] = 0
d = {X(): 0, 1: 1}
self.assertRaises(RuntimeError, d.update, other)
def test_free_after_iterating(self):
support.check_free_after_iterating(self, iter, dict)
support.check_free_after_iterating(self, lambda d: iter(d.keys()), dict)
support.check_free_after_iterating(self, lambda d: iter(d.values()), dict)
support.check_free_after_iterating(self, lambda d: iter(d.items()), dict)
def test_equal_operator_modifying_operand(self):
# test fix for seg fault reported in issue 27945 part 3.
class X():
def __del__(self):
dict_b.clear()
def __eq__(self, other):
dict_a.clear()
return True
def __hash__(self):
return 13
dict_a = {X(): 0}
dict_b = {X(): X()}
self.assertTrue(dict_a == dict_b)
def test_fromkeys_operator_modifying_dict_operand(self):
# test fix for seg fault reported in issue 27945 part 4a.
class X(int):
def __hash__(self):
return 13
def __eq__(self, other):
if len(d) > 1:
d.clear()
return False
d = {} # this is required to exist so that d can be constructed!
d = {X(1): 1, X(2): 2}
try:
dict.fromkeys(d) # shouldn't crash
except RuntimeError: # implementation defined
pass
def test_fromkeys_operator_modifying_set_operand(self):
# test fix for seg fault reported in issue 27945 part 4b.
class X(int):
def __hash__(self):
return 13
def __eq__(self, other):
if len(d) > 1:
d.clear()
return False
d = {} # this is required to exist so that d can be constructed!
d = {X(1), X(2)}
try:
dict.fromkeys(d) # shouldn't crash
except RuntimeError: # implementation defined
pass
def test_dictitems_contains_use_after_free(self):
class X:
def __eq__(self, other):
d.clear()
return NotImplemented
d = {0: set()}
(0, X()) in d.items()
def test_init_use_after_free(self):
class X:
def __hash__(self):
pair[:] = []
return 13
pair = [X(), 123]
dict([pair])
def test_oob_indexing_dictiter_iternextitem(self):
class X(int):
def __del__(self):
d.clear()
d = {i: X(i) for i in range(8)}
def iter_and_mutate():
for result in d.items():
if result[0] == 2:
d[2] = None # free d[2] --> X(2).__del__ was called
self.assertRaises(RuntimeError, iter_and_mutate)
from test import mapping_tests
class GeneralMappingTests(mapping_tests.BasicTestMappingProtocol):
type2test = dict
class Dict(dict):
pass
class SubclassMappingTests(mapping_tests.BasicTestMappingProtocol):
type2test = Dict
if __name__ == "__main__":
unittest.main()
| |
#!/usr/bin/env python3
"""
This is to assign functional annotation to gene models using a wide variety of evidence. Meant to
support unlimited hierarchy rules, this utility relies on a user-created configuration file.
Current limitations:
- Rules are evaluated on a per-gene basis, so no rules are currently possible that
would need to consider annotations of other genes in the set.
- IGS 'PFunc' hierarchy
http://imgur.com/1odYcT5
"""
import argparse
import cProfile
import math
import os
import re
import biocode.annotation
import biocode.gff
import biocode.utils
import biocode.things
import sqlite3
import sys
import xml.etree.ElementTree as etree
import yaml
def main():
parser = argparse.ArgumentParser( description='Assigns functional annotation based on user-configurable evidence tiers')
## output file to be written
parser.add_argument('-c', '--config_file', type=str, required=True, help='Configuration file for annotation' )
parser.add_argument('-o', '--output_base', type=str, required=True, help='Base name/path of output files to be created' )
parser.add_argument('-f', '--output_format', type=str, required=False, default='gff3', help='Desired output format' )
parser.add_argument('-npp', '--no_product_processing', action="store_true", help="Pass this to turn off post-processing of gene product names")
args = parser.parse_args()
sources_log_fh = open("{0}.sources.log".format(args.output_base), 'wt')
configuration = yaml.load(open(args.config_file).read())
check_configuration(configuration, args)
evidence = parse_evidence_config(configuration)
default_product_name = configuration['general']['default_product_name']
# stores any active SQLite3 db connections
db_conn = dict()
# this is a dict of biothings.Polypeptide objects
polypeptides = initialize_polypeptides(sources_log_fh, configuration['input']['polypeptide_fasta'], default_product_name)
for label in configuration['order']:
if label not in evidence:
raise Exception("ERROR: There is a label '{0}' in the 'order' section of the conf file that isn't present in the 'evidence' section".format(label))
if evidence[label]['type'] == 'HMMer3_htab':
index_conn, ev_db_conn = get_or_create_db_connections(type_ev='hmm_ev', configuration=configuration,
evidence=evidence, label=label, db_conn=db_conn, output_base=args.output_base)
index_conn.isolation_level = None
apply_hmm_evidence(polypeptides=polypeptides, ev_conn=ev_db_conn, config=configuration,
ev_config=evidence[label], label=label, index_conn=index_conn, log_fh=sources_log_fh)
elif evidence[label]['type'] == 'RAPSearch2_m8':
index_conn, ev_db_conn = get_or_create_db_connections(type_ev='blast_ev', configuration=configuration,
evidence=evidence, label=label, db_conn=db_conn, output_base=args.output_base)
index_conn.isolation_level = None
apply_blast_evidence(polypeptides=polypeptides, ev_conn=ev_db_conn, config=configuration,
ev_config=evidence[label], label=label, index_conn=index_conn, log_fh=sources_log_fh)
elif evidence[label]['type'] == 'TMHMM':
index_conn, ev_db_conn = get_or_create_db_connections(type_ev='tmhmm_ev', configuration=configuration,
evidence=evidence, label=label, db_conn=db_conn, output_base=args.output_base)
apply_tmhmm_evidence(polypeptides=polypeptides, ev_conn=ev_db_conn, config=configuration,
ev_config=evidence[label], label=label, log_fh=sources_log_fh)
elif evidence[label]['type'] == 'lipoprotein_motif_bsml':
index_conn, ev_db_conn = get_or_create_db_connections(type_ev='lipoprotein_motif_ev', configuration=configuration,
evidence=evidence, label=label, db_conn=db_conn, output_base=args.output_base)
apply_lipoprotein_motif_evidence(polypeptides=polypeptides, ev_conn=ev_db_conn, config=configuration,
ev_config=evidence[label], label=label, log_fh=sources_log_fh)
else:
raise Exception("ERROR: Unsupported evidence type '{0}' with label '{1}' in configuration file".format(evidence[label]['type'], label))
# close all db connections
for label in db_conn:
db_conn[label].close()
polyset = biocode.things.PolypeptideSet()
polyset.load_from_dict(polypeptides)
# Do any post-processing
if not args.no_product_processing:
for polypeptide in polyset.polypeptides:
annot = polypeptide.annotation
annot.set_processed_product_name()
annot.set_processed_gene_symbol()
perform_final_checks(polypeptides=polypeptides, config=configuration, log_fh=sources_log_fh)
# Write the output
if args.output_format == 'fasta':
polyset.write_fasta(path="{0}.faa".format(args.output_base))
elif args.output_format == 'gff3':
## parse input GFF
(assemblies, ref_features) = biocode.gff.get_gff3_features( configuration['input']['gff3'] )
## merge annotation with polypeptide collection
biocode.gff.add_annotation(features=ref_features, polypeptide_set=polyset)
## print the new GFF
biocode.gff.print_gff3_from_assemblies(assemblies=assemblies, ofh=open("{0}.gff3".format(args.output_base), 'wt'))
def already_indexed(path=None, index=None):
curs = index.cursor()
curs.execute("SELECT id FROM data_sources WHERE source_path = ?", (path, ))
found = False
for row in curs:
found = True
break
curs.close()
return found
def apply_blast_evidence(polypeptides=None, ev_conn=None, config=None, ev_config=None, label=None, index_conn=None, log_fh=None):
"""
Uses BLAST (or similar) evidence to assign functional evidence to polypeptides. Description of arguments:
polypeptides: a dict of biothings.Polypeptides objects, keyed on ID
ev_conn: SQLite3 connection to the parsed BLAST(ish) search evidence db for this set of searches
config: The yaml object for the parsed annotation config file
ev_config: The parsed evidence section for this label within the annotation config file
label: Label for the evidence track entry within the annotation config file
index_conn: SQLite3 connection to the reference index for the database searched
"""
print("DEBUG: Processing BLAST (or blast-like) evidence tier label: {0}".format(label))
default_product = config['general']['default_product_name']
ev_curs = ev_conn.cursor()
# Doing all the cursors here prevents doing it repeatedly within calling each accession. Huge performance improvement.
index_acc_curs = index_conn.cursor()
index_acc_curs.execute("begin")
index_ec_curs = index_conn.cursor()
index_go_curs = index_conn.cursor()
ev_qry = "SELECT sbj_id, align_len, perc_identity, eval, bit_score FROM blast_hit WHERE qry_id = ? ORDER BY eval ASC"
print("DEBUG: Applying {1} results to {0} polypeptides".format(len(polypeptides), label))
blast_class_limit = None
if 'class' in ev_config:
blast_class_limit = ev_config['class']
prepend_text = None
if 'prepend_text' in ev_config:
prepend_text = ev_config['prepend_text']
append_text = None
if 'append_text' in ev_config:
append_text = ev_config['append_text']
if 'debugging_polypeptide_limit' in config['general']:
DEBUG_LIMIT = config['general']['debugging_polypeptide_limit']
# Are coverage cutoffs defined?
query_cov_cutoff = None
match_cov_cutoff = None
if 'query_cov' in ev_config:
query_cov_cutoff = int(ev_config['query_cov'].rstrip('%'))
if 'match_cov' in ev_config:
match_cov_cutoff = int(ev_config['match_cov'].rstrip('%'))
percent_identity_cutoff = None
if 'percent_identity_cutoff' in ev_config:
percent_identity_cutoff = int(ev_config['percent_identity_cutoff'].rstrip('%'))
for id in polypeptides:
polypeptide = polypeptides[id]
print("DEBUG: Parsing {0} evidence for polypeptide ID {1}, length: {2}".format(label, id, polypeptide.length))
annot = polypeptide.annotation
DEBUG_LIMIT = DEBUG_LIMIT - 1
if DEBUG_LIMIT == 0:
break
if config['general']['allow_attributes_from_multiple_sources'] == 'Yes':
raise Exception("ERROR: Support for the general:allow_attributes_from_multiple_sources=Yes setting not yet implemented")
else:
if annot.product_name != default_product: continue
for ev_row in ev_curs.execute(ev_qry, (polypeptide.id,)):
if query_cov_cutoff is not None:
perc_coverage = (ev_row[1] / polypeptide.length) * 100
if perc_coverage < query_cov_cutoff:
print("\tSkipping accession {0} because coverage {1}% doesn't meet cutoff {2}% requirement".format(
ev_row[0], perc_coverage, query_cov_cutoff))
continue
if percent_identity_cutoff is not None:
if ev_row[2] < percent_identity_cutoff:
print("\tSkipping accession {0} because percent identity of {1}% doesn't meet cutoff {2}% requirement".format(
ev_row[0], ev_row[2], percent_identity_cutoff))
continue
blast_annot = get_blast_accession_info(conn=index_conn, accession=ev_row[0], config=config, acc_curs=index_acc_curs, ec_curs=index_ec_curs, go_curs=index_go_curs)
# is there a class limitation?
if blast_class_limit is not None:
if blast_class_limit == 'trusted':
if 'is_characterized' in blast_annot.other_attributes:
if blast_annot.other_attributes['is_characterized'] != 1:
print("\tSkipping accession {0} because it is not characterized".format(ev_row[0]))
continue
else:
print("\tAccepting accession {0} because it is characterized".format(ev_row[0]))
pass
else:
raise Exception("ERROR: Unrecognized value ('{0}') for class in config file".format(blast_class_limit))
if match_cov_cutoff is not None:
if 'ref_len' not in blast_annot.other_attributes:
print("\tSkipping accession {0} because length wasn't found".format(ev_row[0]))
continue
match_coverage = (ev_row[1] / blast_annot.other_attributes['ref_len'])*100
if match_coverage < match_cov_cutoff:
print("\tSkipping accession {0} because match coverage {1}% doesn't meet cutoff {2}% requirement".format(
ev_row[0], match_coverage, match_cov_cutoff))
continue
if prepend_text is None:
annot.product_name = blast_annot.product_name
else:
annot.product_name = "{0} {1}".format(prepend_text, blast_annot.product_name)
if append_text is not None:
annot.product_name = "{0} {1}".format(annot.product_name, append_text)
log_fh.write("INFO: {1}: Set product name to '{0}' from {3} hit to {2}\n".format(
annot.product_name, id, ev_row[0], label))
annot.gene_symbol = blast_annot.gene_symbol
log_fh.write("INFO: {1}: Set gene_symbol to '{0}' from {3} hit to {2}\n".format(
annot.gene_symbol, id, ev_row[0], label))
for go_annot in blast_annot.go_annotations:
annot.add_go_annotation(go_annot)
for ec_num in blast_annot.ec_numbers:
annot.add_ec_number(ec_num)
# If we get this far we've assigned annotation and don't want to look at any more
break
ev_curs.close()
index_acc_curs.close()
index_ec_curs.close()
index_go_curs.close()
def apply_hmm_evidence(polypeptides=None, ev_conn=None, config=None, ev_config=None, label=None, index_conn=None, log_fh=None):
"""
Uses HMM evidence to assign functional evidence to polypeptides. Description of arguments:
polypeptides: a dict of biothings.Polypeptides objects, keyed on ID
ev_conn: SQLite3 connection to the parsed HMM search evidence db for this set of searches
config: The yaml object for the parsed annotation config file
ev_config: The parsed evidence section for this label within the annotation config file
label: Label for the evidence track entry within thet annotation config file
index_conn: SQLite3 connection to the reference index for the database searched
"""
default_product = config['general']['default_product_name']
ev_curs = ev_conn.cursor()
ev_qry = "SELECT hmm_accession, total_score FROM hmm_hit WHERE qry_id = ? ORDER BY total_hit_eval ASC"
acc_main_curs = index_conn.cursor()
acc_main_curs.execute("begin")
hmm_class_limit = None
go_curs = index_conn.cursor()
go_qry = "SELECT go_id FROM hmm_go WHERE hmm_id = ?"
ec_curs = index_conn.cursor()
ec_qry = "SELECT ec_id FROM hmm_ec WHERE hmm_id = ?"
if 'class' in ev_config:
hmm_class_limit = ev_config['class']
prepend_text = None
if 'prepend_text' in ev_config:
prepend_text = ev_config['prepend_text']
append_text = None
if 'append_text' in ev_config:
append_text = ev_config['append_text']
print("DEBUG: Applying HMM results to {0} polypeptides".format(len(polypeptides)))
if 'debugging_polypeptide_limit' in config['general']:
DEBUG_LIMIT = config['general']['debugging_polypeptide_limit']
for id in polypeptides:
#print("DEBUG: Parsing {0} evidence for polypeptide ID {1}".format(label, id))
polypeptide = polypeptides[id]
annot = polypeptide.annotation
DEBUG_LIMIT = DEBUG_LIMIT - 1
if DEBUG_LIMIT == 0:
break
if config['general']['allow_attributes_from_multiple_sources'] == 'Yes':
raise Exception("ERROR: Support for the general:allow_attributes_from_multiple_sources=Yes setting not yet implemented")
else:
# If this has changed already, it has already been annotated
if annot.product_name != default_product: continue
for ev_row in ev_curs.execute(ev_qry, (polypeptide.id,)):
acc_main_qry = "SELECT version, hmm_com_name, ec_num, isotype, id FROM hmm WHERE (version = ? or accession = ?)"
if hmm_class_limit is None:
acc_main_qry_args = (ev_row[0], ev_row[0], )
else:
acc_main_qry += " AND isotype = ?"
acc_main_qry_args = (ev_row[0], ev_row[0], hmm_class_limit)
for acc_main_row in acc_main_curs.execute(acc_main_qry, acc_main_qry_args):
if prepend_text is None:
annot.product_name = acc_main_row[1]
else:
annot.product_name = "{0} {1}".format(prepend_text, acc_main_row[1])
if append_text is not None:
annot.product_name = "{0} {1}".format(annot.product_name, append_text)
log_fh.write("INFO: {1}: Set product name to '{0}' from {3} hit to {2}, isotype:{3}\n".format(
annot.product_name, id, ev_row[0], hmm_class_limit, label))
if acc_main_row[2] is not None:
annot.gene_symbol = acc_main_row[2]
log_fh.write("INFO: {1}: Set gene_symbol to '{0}' from {3} hit to {2}, isotype:{3}\n".format(
annot.gene_symbol, id, ev_row[0], hmm_class_limit, label))
## add any matching GO terms
for go_row in go_curs.execute(go_qry, (acc_main_row[4],)):
annot.add_go_annotation(biocode.annotation.GOAnnotation(go_id=go_row[0]))
## add any matching EC numbers
for ec_row in ec_curs.execute(ec_qry, (acc_main_row[4],)):
annot.add_ec_number(biocode.annotation.ECAnnotation(number=ec_row[0]))
break
acc_main_curs.close()
ev_curs.close()
go_curs.close()
ec_curs.close()
def apply_lipoprotein_motif_evidence(polypeptides=None, ev_conn=None, config=None, ev_config=None, label=None, log_fh=None):
default_product = config['general']['default_product_name']
lipoprotein_motif_default_product = ev_config['product_name']
ev_curs = ev_conn.cursor()
ev_qry = """
SELECT hit_acc, hit_desc, start, stop
FROM lipoprotein_motif_hit
WHERE qry_id = ?
"""
if 'debugging_polypeptide_limit' in config['general']:
DEBUG_LIMIT = config['general']['debugging_polypeptide_limit']
print("DEBUG: Applying lipoprotein_motif results to {0} polypeptides".format(len(polypeptides)))
for id in polypeptides:
#print("DEBUG: Parsing {0} evidence for polypeptide ID {1}".format(label, id))
polypeptide = polypeptides[id]
annot = polypeptide.annotation
DEBUG_LIMIT = DEBUG_LIMIT - 1
if DEBUG_LIMIT == 0:
break
if config['general']['allow_attributes_from_multiple_sources'] == 'Yes':
raise Exception("ERROR: Support for the general:allow_attributes_from_multiple_sources=Yes setting not yet implemented")
else:
if annot.product_name != default_product: continue
for ev_row in ev_curs.execute(ev_qry, (polypeptide.id,)):
log_fh.write("INFO: {0}: Set product name to '{1}' because it had a lipoprotein_motif match to accession:{2}, description:{3}\n".format(
id, lipoprotein_motif_default_product, ev_row[0], ev_row[1]))
annot.product_name = lipoprotein_motif_default_product
break
ev_curs.close()
def apply_tmhmm_evidence(polypeptides=None, ev_conn=None, config=None, ev_config=None, label=None, log_fh=None):
default_product = config['general']['default_product_name']
tmhmm_default_product = ev_config['product_name']
min_helical_spans = int(ev_config['min_helical_spans'])
ev_curs = ev_conn.cursor()
ev_qry = """
SELECT th.id, count(tp.hit_id)
FROM tmhmm_hit th
JOIN tmhmm_path tp ON th.id=tp.hit_id
WHERE th.qry_id = ?
"""
if 'debugging_polypeptide_limit' in config['general']:
DEBUG_LIMIT = config['general']['debugging_polypeptide_limit']
print("DEBUG: Applying TMHMM results to {0} polypeptides".format(len(polypeptides)))
for id in polypeptides:
#print("DEBUG: Parsing {0} evidence for polypeptide ID {1}".format(label, id))
polypeptide = polypeptides[id]
annot = polypeptide.annotation
DEBUG_LIMIT = DEBUG_LIMIT - 1
if DEBUG_LIMIT == 0:
break
if config['general']['allow_attributes_from_multiple_sources'] == 'Yes':
raise Exception("ERROR: Support for the general:allow_attributes_from_multiple_sources=Yes setting not yet implemented")
else:
if annot.product_name != default_product: continue
for ev_row in ev_curs.execute(ev_qry, (polypeptide.id,)):
if ev_row[1] >= min_helical_spans:
annot.product_name = tmhmm_default_product
log_fh.write("INFO: {0}: Set product name to '{1}' because it had a TMHMM prediction of {2} transmembrane helices\n".format(
id, tmhmm_default_product, ev_row[1]))
annot.add_go_annotation( biocode.annotation.GOAnnotation(go_id='0016021') )
break
ev_curs.close()
def cache_blast_hit_data(version=None, ref_curs=None, ev_curs=None):
"""
Gets annotation for a specific accession and copies its entry from the large source index into
our smaller hits-found-only evidence index.
"""
ref_blast_select_qry = """
SELECT e.id, e.full_name, e.organism, e.symbol, ea.accession, ea.res_length, ea.is_characterized
FROM entry e
JOIN entry_acc ea on ea.id=e.id
WHERE ea.accession = ?
"""
ev_blast_insert_qry = "INSERT INTO entry (id, full_name, organism, symbol) VALUES (?, ?, ?, ?)"
ev_acc_insert_qry = "INSERT INTO entry_acc (id, accession, res_length, is_characterized) VALUES (?, ?, ?, ?)"
ref_go_select_qry = "SELECT id, go_id FROM entry_go WHERE id = ?"
ev_go_insert_qry = "INSERT INTO entry_go (id, go_id) VALUES (?, ?)"
ref_ec_select_qry = "SELECT id, ec_num FROM entry_ec WHERE id = ?"
ev_ec_insert_qry = "INSERT INTO entry_ec (id, ec_num) VALUES (?, ?)"
ref_curs.execute(ref_blast_select_qry, (version,))
entry_row = ref_curs.fetchone()
if entry_row is not None:
entry_id = entry_row[0]
ev_curs.execute(ev_blast_insert_qry, (entry_id, entry_row[1], entry_row[2], entry_row[3]))
ev_curs.execute(ev_acc_insert_qry, (entry_id, entry_row[4], entry_row[5], entry_row[6]))
ref_curs.execute(ref_go_select_qry, (entry_id,))
for go_row in ref_curs:
ev_curs.execute(ev_go_insert_qry, (entry_id, go_row[1]))
ref_curs.execute(ref_ec_select_qry, (entry_id,))
for ec_row in ref_curs:
ev_curs.execute(ev_ec_insert_qry, (entry_id, ec_row[1]))
def cache_ev_blast_accessions(idx):
"""
Creates a list of the accession entries whose reference annotation attributes have already
been stored within this portable evidence DB.
"""
ev_qry = "SELECT accession FROM entry_acc"
ev_curs = idx.cursor()
ev_curs.execute(ev_qry)
accessions = dict()
for row in ev_curs:
accessions[row[0]] = True
return accessions
def cache_ev_hmm_version_accessions(idx):
"""
Creates a list of the HMM version entries whose reference annotation attributes have already
been stored within this portable evidence DB.
"""
ev_qry = "SELECT version FROM hmm"
ev_curs = idx.cursor()
ev_curs.execute(ev_qry)
versions = dict()
for row in ev_curs:
versions[row[0]] = True
return versions
def cache_hmm_hit_data(version=None, ev_curs=None, ref_curs=None):
"""
Checks to see if annotation for that HMM is stored within the evidence index. If not,
the relevant entries from the reference are copied.
"""
ref_qry = """
SELECT id, accession, version, name, hmm_com_name, hmm_len, hmm_comment, trusted_global_cutoff,
trusted_domain_cutoff, noise_global_cutoff, noise_domain_cutoff, gathering_global_cutoff,
gathering_domain_cutoff, ec_num, gene_symbol, isotype
FROM hmm
WHERE version = ?
"""
ref_go_select_qry = """
SELECT id, hmm_id, go_id
FROM hmm_go
WHERE hmm_id = ?
"""
ev_qry = """
INSERT INTO hmm (id, accession, version, name, hmm_com_name, hmm_len, hmm_comment, trusted_global_cutoff,
trusted_domain_cutoff, noise_global_cutoff, noise_domain_cutoff, gathering_global_cutoff,
gathering_domain_cutoff, ec_num, gene_symbol, isotype)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
"""
ev_go_insert_qry = "INSERT INTO hmm_go (id, hmm_id, go_id) VALUES (?, ?, ?)"
ref_curs.execute(ref_qry, (version,))
hmm_row = ref_curs.fetchone()
if hmm_row is not None:
ev_curs.execute(ev_qry, hmm_row)
ref_curs.execute(ref_go_select_qry, (hmm_row[0],))
for hmm_go_row in ref_curs:
ev_curs.execute(ev_go_insert_qry, hmm_go_row)
def check_configuration(conf, userargs):
"""
Performs any basic checks on the annotation configuration file format/syntax/values. Ideally done
before most of the rest of the script to save wasted compute time.
"""
# make sure each of the expected sections are there
for section in ['general', 'indexes', 'input', 'order', 'evidence']:
if section not in conf:
raise Exception("ERROR: Expected a section called '{0}' in the annotation config file, but didn't find one.".format(section))
# make sure the input section has at least fasta defined
if 'polypeptide_fasta' not in conf['input']:
raise Exception("ERROR: You must at least define 'polypeptide_fasta' data in the 'input' section of the annotation config file")
# the output format should be one of the recognized ones.
supported_output_formats = ['fasta', 'gff3']
userargs.output_format = userargs.output_format.lower()
if userargs.output_format not in supported_output_formats:
raise Exception("ERROR: The output format specified '{0}' isn't supported. Please choose from: {1}".format(userargs.output_format,
", ".join(supported_output_formats)))
# user must have defined input GFF if they've requested GFF as output.
if userargs.output_format == 'gff3':
if 'gff3' not in conf['input'] or conf['input']['gff3'] is None or len(conf['input']['gff3']) == 0:
raise Exception("ERROR: If you requested gff3 formatted output, you must specify the corresponding gff3 input file (which provides the genomic coordinates of the features involved.)")
# make sure there aren't any indexes referenced in the evidence section which are not defined in the indexes section
indexes = list()
for label in conf['indexes']:
indexes.append(label)
for item in conf['evidence']:
if 'index' in item and item['index'] not in indexes:
raise Exception("ERROR: Evidence item '{0}' references an index '{1}' not found in the indexes section of the config file".format(item['label'], item['index']))
# Any indexes defined should actually exist
for label in conf['indexes']:
if not os.path.exists(conf['indexes'][label]):
raise Exception("ERROR: Index with label:{0} and path:{1} couldn't be found".format(label, conf['indexes'][label]))
def get_blast_accession_info(conn=None, accession=None, config=None, acc_curs=None, ec_curs=None, go_curs=None):
annot = biocode.annotation.FunctionalAnnotation(product_name=config['general']['default_product_name'])
# First we need to get the ID from the accession
qry = """
SELECT e.id, ea.accession, e.full_name, e.symbol, ea.res_length, ea.is_characterized
FROM entry e
JOIN entry_acc ea ON e.id=ea.id
WHERE ea.accession = ?
"""
# SWISS-PROT accessions are in the format: sp|A4YVG3|RRF_BRASO
if accession.startswith('sp|'):
abbrev, accession, sprot_id = accession.split('|')
elif accession.startswith('UniRef100_'):
accession = accession.lstrip('UniRef100_')
for row in acc_curs.execute(qry, (accession,)):
entry_id = row[0]
annot.product_name = row[2]
annot.gene_symbol = row[3]
annot.other_attributes['ref_len'] = row[4]
annot.other_attributes['is_characterized'] = row[5]
qry = "SELECT ec_num FROM entry_ec WHERE id = ?"
for ec_row in ec_curs.execute(qry, (entry_id,)):
annot.add_ec_number( biocode.annotation.ECAnnotation(number=ec_row[0]) )
qry = "SELECT go_id FROM entry_go WHERE id = ?"
for go_row in go_curs.execute(qry, (entry_id,)):
annot.add_go_annotation( biocode.annotation.GOAnnotation(go_id=go_row[0], with_from=row[1]) )
return annot
def get_or_create_db_connections(type_ev=None, configuration=None, evidence=None, label=None,
db_conn=None, output_base=None):
"""
type_ev must be either 'hmm_ev', 'blast_ev' or 'tmhmm_ev'
"""
if type_ev in ['hmm_ev', 'blast_ev']:
index_label = evidence[label]['index']
elif type_ev in ['tmhmm_ev', 'lipoprotein_motif_ev']:
index_label = None
# Use any existing index connection, else attach to it.
index_conn = None
if index_label is not None:
if index_label in db_conn:
index_conn = db_conn[index_label]
else:
try:
index_conn = sqlite3.connect(configuration['indexes'][index_label])
except sqlite3.OperationalError as e:
raise Exception("ERROR: Failed to connect to evidence database {0} because {1}".format(configuration['indexes'][index_label], e))
db_conn[index_label] = index_conn
# Attach to or create an evidence database
ev_db_path = "{0}.{1}.sqlite3".format(output_base, type_ev)
if os.path.exists(ev_db_path):
ev_db_conn = sqlite3.connect(ev_db_path)
else:
ev_db_conn = sqlite3.connect(ev_db_path)
if type_ev == 'hmm_ev':
initialize_hmm_results_db(ev_db_conn)
elif type_ev == 'blast_ev':
initialize_blast_results_db(ev_db_conn)
elif type_ev == 'tmhmm_ev':
initialize_tmhmm_results_db(ev_db_conn)
elif type_ev == 'lipoprotein_motif_ev':
initialize_lipoprotein_motif_results_db(ev_db_conn)
db_conn[type_ev] = ev_db_conn
# only parse the evidence if the list isn't already in the database
if not already_indexed(path=evidence[label]['path'], index=ev_db_conn):
if type_ev == 'hmm_ev':
index_hmmer3_htab(path=evidence[label]['path'], ev_index=ev_db_conn, ref_index=index_conn)
ev_db_conn.commit()
# update the database search indexes
hmm_ev_db_curs = ev_db_conn.cursor()
hmm_ev_db_curs.execute("DROP INDEX IF EXISTS hmm_hit__qry_id")
hmm_ev_db_curs.execute("CREATE INDEX hmm_hit__qry_id ON hmm_hit (qry_id)")
hmm_ev_db_curs.close()
elif type_ev == 'blast_ev':
index_rapsearch2_m8(path=evidence[label]['path'], ev_index=ev_db_conn, ref_index=index_conn)
ev_db_conn.commit()
blast_ev_db_curs = ev_db_conn.cursor()
blast_ev_db_curs.execute("DROP INDEX IF EXISTS blast_hit__qry_id")
blast_ev_db_curs.execute("CREATE INDEX blast_hit__qry_id ON blast_hit (qry_id)")
blast_ev_db_curs.close()
elif type_ev == 'tmhmm_ev':
index_tmhmm_raw(path=evidence[label]['path'], index=ev_db_conn)
ev_db_conn.commit()
tmhmm_ev_db_curs = ev_db_conn.cursor()
tmhmm_ev_db_curs.execute("DROP INDEX IF EXISTS tmhmm_hit__qry_id")
tmhmm_ev_db_curs.execute("CREATE INDEX tmhmm_hit__qry_id ON tmhmm_hit (qry_id)")
tmhmm_ev_db_curs.execute("DROP INDEX IF EXISTS tmhmm_path__hit_id")
tmhmm_ev_db_curs.execute("CREATE INDEX tmhmm_path__hit_id ON tmhmm_path (hit_id)")
tmhmm_ev_db_curs.close()
elif type_ev == 'lipoprotein_motif_ev':
index_lipoprotein_motif(path=evidence[label]['path'], index=ev_db_conn)
ev_db_conn.commit()
lipo_ev_db_curs = ev_db_conn.cursor()
lipo_ev_db_curs.execute("DROP INDEX IF EXISTS lipoprotein_motif_hit__qry_id")
lipo_ev_db_curs.execute("CREATE INDEX lipoprotein_motif_hit__qry_id ON lipoprotein_motif_hit (qry_id)")
lipo_ev_db_curs.close()
ev_db_conn.commit()
return (index_conn, ev_db_conn)
def get_files_from_path(path):
# Can pass a single file, list file or comma-separated combination. We'll rely on the .list file extension here
path_entries = path.split(',')
paths = list()
for path in path_entries:
if path.endswith('.list'):
paths.extend(biocode.utils.read_list_file(path))
else:
paths.extend([path])
return paths
def index_hmmer3_htab(path=None, index=None, ev_index=None, ref_index=None):
ev_curs = ev_index.cursor()
ref_curs = ref_index.cursor()
parsing_errors = 0
qry = """
INSERT INTO hmm_hit (qry_id, qry_start, qry_end, hmm_accession, hmm_length, hmm_start, hmm_end,
domain_score, total_score, total_score_tc, total_score_nc, total_score_gc,
domain_score_tc, domain_score_nc, domain_score_gc, total_hit_eval,
domain_hit_eval)
VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)
"""
paths = get_files_from_path(path)
hmm_versions_found = cache_ev_hmm_version_accessions(ev_index)
for file in paths:
print("INFO: parsing: {0}".format(file))
for line in open(file):
line = line.rstrip()
cols = line.split("\t")
## not sure what this is, but some lines have columns 7+ as these values:
# hmm to ali to bias
if cols[6] == 'hmm': continue
# the following columns in the htab files are nullable, which are filled with various widths of '-' characters
for i in (6,7,8,9,11,19,20):
if '--' in cols[i]:
cols[i] = None
elif len(cols[i]) > 0:
try:
cols[i] = float(cols[i])
except ValueError:
parsing_errors += 1
cols[i] = None
ev_curs.execute(qry, (cols[5], cols[8], cols[9], cols[0], int(cols[2]), cols[6], cols[7],
cols[11], float(cols[12]), float(cols[17]), float(cols[18]), float(cols[23]),
float(cols[21]), float(cols[22]), float(cols[24]), cols[19], cols[20]))
if cols[0] not in hmm_versions_found:
cache_hmm_hit_data(version=cols[0], ev_curs=ev_index, ref_curs=ref_curs)
hmm_versions_found[cols[0]] = True
ev_curs.execute("INSERT INTO data_sources (source_path) VALUES (?)", (path,))
ev_curs.close()
ref_curs.close()
if parsing_errors > 0:
print("WARN: There were {0} parsing errors (columns converted to None) when processing {1}\n".format(parsing_errors, path))
def index_lipoprotein_motif(path=None, index=None):
curs = index.cursor()
parsing_errors = 0
qry = """
INSERT INTO lipoprotein_motif_hit (qry_id, hit_acc, hit_desc, start, stop)
VALUES (?, ?, ?, ?, ?)
"""
paths = get_files_from_path(path)
# http://www.diveintopython3.net/xml.html
for file in paths:
print("INFO: parsing: {0}".format(file))
tree = etree.parse(file)
for elem in tree.iterfind('Definitions/Sequences/Sequence'):
qry_id = elem.attrib['id']
for feature in elem.iterfind('Feature-tables/Feature-table/Feature'):
title = feature.attrib['title']
m = re.match("(\S+) \:\: (.+)", title)
if m:
hit_acc = m.group(1)
hit_desc = m.group(2)
interval = feature.find('Interval-loc')
curs.execute(qry, (qry_id, hit_acc, hit_desc, interval.attrib['startpos'],
interval.attrib['endpos']))
else:
parsing_errors += 1
print("WARN: Unable to parse accession and description from title in file: {0}".format(file))
curs.execute("INSERT INTO data_sources (source_path) VALUES (?)", (path,))
curs.close()
def index_rapsearch2_m8(path=None, ev_index=None, ref_index=None):
ref_curs = ref_index.cursor()
ev_curs = ev_index.cursor()
parsing_errors = 0
# The E-value column can be either the E-value directly or log(E-value), depending on
# the version and options used. Luckily, the header line tells us which it is.
logged_eval = False
qry = """
INSERT INTO blast_hit (qry_id, sbj_id, align_len, qry_start, qry_end, sbj_start,
sbj_end, perc_identity, eval, bit_score)
VALUES (?,?,?,?,?,?,?,?,?,?)
"""
paths = get_files_from_path(path)
accessions_found = cache_ev_blast_accessions(ev_index)
for file in paths:
print("INFO: parsing: {0}".format(file))
for line in open(file):
if line[0] == '#':
m = re.search('log\(e\-value\)', line)
if m:
logged_eval = True
continue
line = line.rstrip()
cols = line.split("\t")
if len(cols) != 12:
continue
if logged_eval == True:
try:
cols[10] = math.pow(10, float(cols[10]))
except OverflowError:
# RapSearch2 sometimes reports miniscule e-values, such a log(eval) of > 1000
# These are outside of the range of Python's double. In my checking of these
# though, their alignments don't warrant a low E-value at all. Skipping them.
print("WARN: Skipping a RAPSearch2 row: overflow error converting E-value ({0}) on line: {1}".format(cols[10], line))
parsing_errors += 1
continue
accession = cols[1]
if accession.startswith('sp|'):
abbrev, accession, sprot_id = accession.split('|')
elif accession.startswith('UniRef100_'):
accession = accession.lstrip('UniRef100_')
ev_curs.execute(qry, (cols[0], accession, int(cols[3]), int(cols[6]), int(cols[7]), int(cols[8]),
int(cols[9]), float(cols[2]), cols[10], float(cols[11])))
if cols[1] not in accessions_found:
cache_blast_hit_data(version=accession, ev_curs=ev_curs, ref_curs=ref_curs)
accessions_found[cols[1]] = True
ev_curs.execute("INSERT INTO data_sources (source_path) VALUES (?)", (path,))
ev_curs.close()
if parsing_errors > 0:
print("WARN: There were {0} parsing errors (match rows skipped) when processing {1}\n".format(parsing_errors, path))
def index_tmhmm_raw(path=None, index=None):
"""
Notes from the esteemed M Giglio:
The GO term to use would be GO:0016021 "integral component of membrane"
Or if you want to be more conservative you could go with GO:0016020 "membrane"
Depends on the evidence. For the prok pipe we are pretty conservative, we require five TMHMM
domains and then we call it putative integral membrane protein.
On ECO - in fact Marcus and I are the developers of ECO. It is an ontology of evidence types.
An annotation to an ECO term is used in conjunction with another annotation, like a GO term
(but many other types of annotation can, and are, used with ECO). It provides additional
information about the annotation. In fact for GO, the assignment of an evidence term along
with a GO term is a required part of a GO annotation. (ECO terms are the "evidence codes" in GO.)
INPUT: Expected TMHMM input (all HTML lines are skipped)
# CHARM010_V2.mRNA.887 Length: 904
# CHARM010_V2.mRNA.887 Number of predicted TMHs: 6
# CHARM010_V2.mRNA.887 Exp number of AAs in TMHs: 133.07638
# CHARM010_V2.mRNA.887 Exp number, first 60 AAs: 21.83212
# CHARM010_V2.mRNA.887 Total prob of N-in: 0.99994
# CHARM010_V2.mRNA.887 POSSIBLE N-term signal sequence
CHARM010_V2.mRNA.887 TMHMM2.0 inside 1 11
CHARM010_V2.mRNA.887 TMHMM2.0 TMhelix 12 34
CHARM010_V2.mRNA.887 TMHMM2.0 outside 35 712
CHARM010_V2.mRNA.887 TMHMM2.0 TMhelix 713 735
CHARM010_V2.mRNA.887 TMHMM2.0 inside 736 755
CHARM010_V2.mRNA.887 TMHMM2.0 TMhelix 756 773
CHARM010_V2.mRNA.887 TMHMM2.0 outside 774 782
CHARM010_V2.mRNA.887 TMHMM2.0 TMhelix 783 805
CHARM010_V2.mRNA.887 TMHMM2.0 inside 806 809
CHARM010_V2.mRNA.887 TMHMM2.0 TMhelix 810 832
CHARM010_V2.mRNA.887 TMHMM2.0 outside 833 871
CHARM010_V2.mRNA.887 TMHMM2.0 TMhelix 872 894
CHARM010_V2.mRNA.887 TMHMM2.0 inside 895 904
"""
curs = index.cursor()
parsing_errors = 0
hit_qry = """
INSERT INTO tmhmm_hit (qry_id, tmh_count, num_aa_in_tmhs, num_aa_in_f60, total_prob_n_in)
VALUES (?, ?, ?, ?, ?)
"""
path_qry = """
INSERT INTO tmhmm_path (hit_id, locus, start, stop)
VALUES (?, ?, ?, ?)
"""
paths = get_files_from_path(path)
for file in paths:
print("INFO: parsing: {0}".format(file))
last_qry_id = None
current_hit_id = None
current_path = list()
tmh_count = num_aa_in_tmhs = num_aa_in_f60 = total_prob_n_in = 0
for line in open(file):
# skip the HTML lines
if line.startswith('<'): continue
m = Match()
if m.match("# (.+?)\s+Length: \d+", line): # this line marks a new result
current_id = m.m.group(1)
# purge the previous result
if last_qry_id is not None:
curs.execute(hit_qry, (last_qry_id, tmh_count, num_aa_in_tmhs, num_aa_in_f60, total_prob_n_in))
current_hit_id = curs.lastrowid
for span in current_path:
curs.execute(path_qry, (current_hit_id, span[2], int(span[3]), int(span[4])))
# reset
last_qry_id = current_id
current_helix_count = tmh_count = num_aa_in_tmhs = num_aa_in_f60 = total_prob_n_in = 0
current_path = list()
elif m.match(".+Number of predicted TMHs:\s+(\d+)", line):
tmh_count = int(m.m.group(1))
elif m.match(".+Exp number of AAs in TMHs:\s+([0-9\.]+)", line):
num_aa_in_tmhs = float(m.m.group(1))
elif m.match(".+Exp number, first 60 AAs:\s+([0-9\.]+)", line):
num_aa_in_f60 = float(m.m.group(1))
elif m.match(".+Total prob of N-in:\s+([0-9\.]+)", line):
total_prob_n_in = float(m.m.group(1))
else:
if line[0] == '#': continue
cols = line.split()
if len(cols) == 5:
current_path.append(cols)
# don't forget to do the last entry
curs.execute(hit_qry, (last_qry_id, tmh_count, num_aa_in_tmhs, num_aa_in_f60, total_prob_n_in))
current_hit_id = curs.lastrowid
for span in current_path:
curs.execute(path_qry, (current_hit_id, span[2], int(span[3]), int(span[4])))
curs.execute("INSERT INTO data_sources (source_path) VALUES (?)", (path,))
curs.close()
index.commit()
if parsing_errors > 0:
print("WARN: There were {0} parsing errors (match rows skipped) when processing {1}\n".format(parsing_errors, path))
def initialize_blast_results_db(conn):
curs = conn.cursor()
curs.execute("""
CREATE TABLE blast_hit (
id integer primary key,
qry_id text,
sbj_id text,
align_len integer,
qry_start integer,
qry_end integer,
sbj_start integer,
sbj_end integer,
perc_identity real,
eval real,
bit_score real
)
""")
curs.execute("""
CREATE TABLE entry (
id text primary key,
full_name text,
organism text,
symbol text
)
""")
curs.execute("""
CREATE TABLE entry_acc (
id text not NULL,
accession text not NULL,
res_length integer,
is_characterized integer DEFAULT 0
)
""")
curs.execute("""
CREATE TABLE entry_go (
id text not NULL,
go_id text not NULL
)
""")
curs.execute("""
CREATE TABLE entry_ec (
id text not NULL,
ec_num text not NULL
)
""")
curs.execute("""
CREATE TABLE data_sources (
id integer primary key,
source_path text
)
""")
curs.close()
conn.commit()
def initialize_hmm_results_db(conn):
curs = conn.cursor()
curs.execute("""
CREATE TABLE hmm_hit (
id integer primary key,
qry_id text,
qry_start integer,
qry_end integer,
hmm_accession text,
hmm_length integer,
hmm_start integer,
hmm_end integer,
domain_score real,
total_score real,
total_score_tc real,
total_score_nc real,
total_score_gc real,
total_hit_eval real,
domain_score_tc real,
domain_score_nc real,
domain_score_gc real,
domain_hit_eval real
)
""")
curs.execute("""
CREATE TABLE hmm (
id integer primary key autoincrement,
accession text,
version text,
name text,
hmm_com_name text,
hmm_len int,
hmm_comment text,
trusted_global_cutoff float,
trusted_domain_cutoff float,
noise_global_cutoff float,
noise_domain_cutoff float,
gathering_global_cutoff float,
gathering_domain_cutoff float,
ec_num text,
gene_symbol text,
isotype text
)
""")
curs.execute("""
CREATE TABLE hmm_go (
id integer primary key autoincrement,
hmm_id int not NULL,
go_id text
)
""")
curs.execute("""
CREATE TABLE hmm_ec (
id integer primary key autoincrement,
hmm_id int not NULL,
ec_id text
)
""")
curs.execute("""
CREATE TABLE data_sources (
id integer primary key,
source_path text
)
""")
curs.close()
conn.commit()
def initialize_lipoprotein_motif_results_db(conn):
curs = conn.cursor()
curs.execute("""
CREATE TABLE lipoprotein_motif_hit (
id integer primary key,
qry_id text,
hit_acc text,
hit_desc text,
start integer,
stop integer
)
""")
curs.execute("""
CREATE TABLE data_sources (
id integer primary key,
source_path text
)
""")
curs.close()
conn.commit()
def initialize_tmhmm_results_db(conn):
curs = conn.cursor()
"""
tmh_count: Number of predicted Trans-Membrane Helices (TMHs)
num_aa_in_tmhs: Exp number of AAs in TMHs
num_aa_in_f60: Exp number of AAs in TMHs within first 60 AAs
"""
curs.execute("""
CREATE TABLE tmhmm_hit (
id integer primary key,
qry_id text,
tmh_count float,
num_aa_in_tmhs float,
num_aa_in_f60 float,
total_prob_n_in float
)
""")
curs.execute("""
CREATE TABLE tmhmm_path (
hit_id integer,
locus text,
start integer,
stop integer
)
""")
curs.execute("""
CREATE TABLE data_sources (
id integer primary key,
source_path text
)
""")
curs.close()
conn.commit()
def initialize_polypeptides( log_fh, fasta_file, default_name ):
'''
Reads a FASTA file of (presumably) polypeptide sequences and creates a dict of Polypeptide
objects, keyed by ID, with bioannotation.FunctionalAnnotation objects attached.
'''
seqs = biocode.utils.fasta_dict_from_file( fasta_file )
polypeptides = dict()
for seq_id in seqs:
polypeptide = biocode.things.Polypeptide( id=seq_id, length=len(seqs[seq_id]['s']), residues=seqs[seq_id]['s'] )
annotation = biocode.annotation.FunctionalAnnotation(product_name=default_name)
log_fh.write("INFO: {0}: Set initial product name to '{1}'\n".format(seq_id, default_name))
polypeptide.annotation = annotation
polypeptides[seq_id] = polypeptide
return polypeptides
def parse_evidence_config(conf):
"""
Parses the 'evidence' section of the annotation config file, and returns a dict where each key
is the label of that evidence and the value is a dict of the other key/value pairs
"""
ev = dict()
for entry in conf['evidence']:
# make sure there aren't duplicates
label = entry['label']
if label in ev:
raise Exception("ERROR: duplicate label found in evidence track: {0}".format(label))
else:
ev[label] = dict()
for key in entry:
if key is not 'label':
ev[label][key] = entry[key]
return ev
def perform_final_checks(polypeptides=None, config=None, log_fh=None):
"""
Does a round of checks we want to perform on an annotated set of polypeptides
before exporting them. Currently:
- Make sure a gene product name is assigned. This might accidentally become "None" if
a match existed to a subject which didn't have a name properly entered in the index.
"""
for id in polypeptides:
polypeptide = polypeptides[id]
if polypeptide.annotation.product_name is None:
log_fh.write("WARNING: {0}: Somehow made it through annotation with no product name. Setting to default\n".format(id))
polypeptide.annotation.product_name = config['general']['default_product_name']
class Match(object):
"""
Python doesn't really have a good syntax for doing a series of conditional checks on a line if you want
to also capture part of the matter and use it. This wraps the match object so that we can do exactly that.
Example use:
match = Match()
if match.match(pattern1, string1):
do_something( print(match.m.group(1)) )
elif match.match(pattern2, string2):
do_something_else( print(match.m.group(1)) )
"""
def __init__(self):
self.m = None
def match(self, *args, **kwds):
self.m = re.match(*args, **kwds)
return self.m is not None
if __name__ == '__main__':
cProfile.run('main()', "FALCON.profile.{0}.dat".format(os.getpid()))
| |
def fit_2lines(self,wl,spec1d,err1d,a0=n.array([2795.,2802.]),lineName="AA",lineName2="BB", aTR=2797.5,cmin1=2650.,cmax1=2770.,cmin2=2807.,cmax2=2840.,prior=n.array([1.,1.,1e-18,-1e-18]),model="gaussian"):
"""
fits jointly two gaussian line profiles
:param wl: wavelength (array, Angstrom)
:param spec1d: flux observed in the broad band (array, f lambda)
:param err1d: error on the flux observed in the broad band (array, f lambda)
:param a0: expected position of the peak of the line in the observed frame (redshifted). 2 positions given.
:param lineName: suffix characterizing the first line in the headers of the output
:param lineName2: suffix characterizing the second line in the headers of the output
:param DLC: wavelength extent to fit the continuum around the line. (def: 230 Angstrom)
:param p0_sigma: prior on the line width in A (def: 15 A)
:param p0_flux: prior on the line flux in erg/cm2/s/A (def: 8e-17)
:param p0_share: prior on the share between the two [OII] lines. (def: 0.58)
:param continuumSide: "left" = bluewards of the line or "right" = redwards of the line
:param model: line model to be fitted : "gaussian", "lorentz" or "pseudoVoigt"
Returns :
* array 1 with the parameters of the model
* array 2 with the model (wavelength, flux model)
* header corresponding to the array 1
"""
header=" a0_"+lineName+" flux_"+lineName+" fluxErr_"+lineName+" sigma_"+lineName+" sigmaErr_"+lineName+" a0_"+lineName2+" flux_"+lineName2+" fluxErr_"+lineName2+" sigma_"+lineName2+" sigmaErr_"+lineName2+" continu_"+lineName+" continu_"+lineName2+" chi2_"+lineName+" ndof_"+lineName
outPutNF=n.array([a0[0], self.dV,self.dV,self.dV,self.dV, a0[1],self.dV,self.dV,self.dV,self.dV, self.dV,self.dV,self.dV,self.dV])
modNF=n.array([self.dV,self.dV])
domainLine=(wl>a0.min()-self.fitWidth)&(wl<a0.max()+self.fitWidth)
domainContL=(wl>cmin1)&(wl<cmax1)
domainContR=(wl>cmin2)&(wl<cmax2)
if cmin1+50>wl.min() and cmax2-50<wl.max() and len(domainContL.nonzero()[0])>2 and len(domainContR.nonzero()[0])>2 :
continuL=n.median(spec1d[domainContL])
continuR=n.median(spec1d[domainContR])
# model : absorption first, emission second
def flG(aai,sig1,sig2,F1,F2):
aa=aai[(aai<aTR)]
left=continuL + F1*(n.e**(-(aa-a0[0])**2./(2.*sig1**2.)))/(sig1*(2.*n.pi)**0.5)
aa=aai[(aai>=aTR)]
right=continuR + F2*(n.e**(-(aa-a0[1])**2./(2.*sig2**2.)))/(sig2*(2.*n.pi)**0.5)
return n.hstack((left, right))
out = curve_fit(flG, wl[domainLine], spec1d[domainLine], p0=prior,sigma=err1d[domainLine],maxfev=500000000)
if out[1].__class__==n.ndarray : # if the fit worked
model1=flG(wl[domainLine],out[0][0],out[0][1],out[0][2],out[0][3])
var=err1d[domainLine]
chi2=n.sum(abs(model1-spec1d[domainLine])**2./var**2.)
ndof=len(var)
sig1=out[0][0]
sig1Err=out[1][0][0]**0.5
sig2=out[0][1]
sig2Err=out[1][1][1]**0.5
flux1=out[0][2]
flux1Err=out[1][2][2]**0.5
flux2=out[0][3]
flux2Err=out[1][3][3]**0.5
outPut=n.array([a0[0],flux1,flux1Err,sig1,sig1Err,a0[1],flux2,flux2Err,sig2,sig2Err,continuL,continuR,chi2,ndof])
mod=n.array([wl[domainLine],model1])
return outPut,mod,header
else :
return outPutNF,modNF,header
else :
return outPutNF,modNF,header
def fit_doublet_position(wl,spec1d,err1d,aTR=4994.,a0=n.array([O3_4960,O3_5007]), cmin1=4400,cmax1=4800,cmin2=5025,cmax2=5150, p0=n.array([3.,3.,1e-16,3e-16,O3_4960,O3_5007]),model="gaussian"):
""" re fits the [OIII] doublet to determine the centroid of the lines
input single line fits of the [OIII] lines
returns the position of the gaussians"""
header=" a0_O3aDB flux_O3aDB fluxErr_O3aDB sigma_O3aDB sigmaErr_O3aDB a0_O3aDB_fit a0_O3aDB_fitErr a0_O3bDB flux_O3bDB fluxErr_O3bDB sigma_O3bDB sigmaErr_O3bDB a0_O3bDB_fit a0_O3bDB_fitErr cont_O3aDB cont_O3bDB chi2_O3DB"
outPutNF=n.array([a0[0], self.dV,self.dV,self.dV, self.dV,self.dV,self.dV, a0[1],self.dV,self.dV,self.dV, self.dV,self.dV,self.dV, self.dV,self.dV,self.dV])
modNF=n.array([self.dV,self.dV])
#where the continuum and the lines are considered
domainLine=(wl>a0.min()-self.fitWidth)&(wl<a0.max()+self.fitWidth)
domainContL=(wl>cmin1)&(wl<cmax1)
domainContR=(wl>cmin2)&(wl<cmax2)
#print wl,spec1d,aTR,a0
#print cmin1,cmax1,cmin2,cmax2
#print spec1d[domainContL],spec1d[domainContR]
# condition for fit
if cmin1+50>wl.min() and cmax2-50<wl.max() and len(domainContL.nonzero()[0])>2 and len(domainContR.nonzero()[0])>2 :
continuL=n.median(spec1d[domainContL])
continuR=n.median(spec1d[domainContR])
# model : absorption first, emission second
def flG(aai,sig1,sig2,F1,F2,aa1,aa2):
aa=aai[(aai<aTR)]
left=continuL + F1*(n.e**(-(aa-aa1)**2./(2.*sig1**2.)))/(sig1*(2.*n.pi)**0.5)
aa=aai[(aai>=aTR)]
right=continuR + F2*(n.e**(-(aa-aa2)**2./(2.*sig2**2.)))/(sig2*(2.*n.pi)**0.5)
return n.hstack((left, right))
out = curve_fit(flG, wl[domainLine], spec1d[domainLine], p0=p0,sigma=err1d[domainLine],maxfev=500000000)
# if the fit worked
if out[1].__class__==n.ndarray :
model1=flG(wl[domainLine],out[0][0],out[0][1],out[0][2],out[0][3],out[0][4],out[0][5])
var=err1d[domainLine]
chi2=n.sum(abs(model1-spec1d[domainLine])**2./var**2.)/len(var)
sig1=out[0][0]
sig1Err=out[1][0][0]**0.5
sig2=out[0][1]
sig2Err=out[1][1][1]**0.5
flux1=out[0][2]
flux1Err=out[1][2][2]**0.5
flux2=out[0][3]
flux2Err=out[1][3][3]**0.5
aa1=out[0][4]
aa1Err=out[1][4][4]**0.5
aa2=out[0][5]
aa2Err=out[1][5][5]**0.5
print aa1, aa2
outPut=n.array([a0[0],flux1,flux1Err,sig1,sig1Err,aa1,aa1Err,a0[1],flux2,flux2Err,sig2,sig2Err,aa2,aa2Err,continuL,continuR,chi2])
mod=n.array([wl[domainLine],model1])
return outPut,mod, header
else :
return outPutNF,modNF,header
else :
return outPutNF,modNF,header
def fit_3lines(wl,spec1d,err1d,a0=n.array([6564.61,6549.859,6585.268]),lineName="AAA",lineName2="BBB",lineName3="CCC",aTR=6575.,cmin1=6400.,cmax1=6525.,cmin2=6600.,cmax2=6700.,prior=n.array([2.,4.,2.,1e-17,1e-16,1e-17]),model="gaussian"):
header=" a0_"+lineName+" flux_"+lineName+" fluxErr_"+lineName+" sigma_"+lineName+" sigmaErr_"+lineName+" a0_"+lineName2+" flux_"+lineName2+" fluxErr_"+lineName2+" sigma_"+lineName2+" sigmaErr_"+lineName2+" a0_"+lineName3+" flux_"+lineName3+" fluxErr_"+lineName3+" sigma_"+lineName3+" sigmaErr_"+lineName3+" continu_"+lineName+" continu_"+lineName3+" chi2_"+lineName+" ndof_"+lineName
outPutNF=n.array([a0[0],self.dV,self.dV,self.dV,self.dV, a0[1],self.dV,self.dV,self.dV,self.dV, a0[2],self.dV,self.dV,self.dV,self.dV, self.dV,self.dV,self.dV,self.dV])
modNF=n.array([self.dV,self.dV])
domainLine=(wl>a0.min()-self.fitWidth)&(wl<a0.max()+self.fitWidth)
domainContL=(wl>cmin1)&(wl<cmax1)
domainContR=(wl>cmin2)&(wl<cmax2)
if cmin1+50>wl.min() and cmax2-50<wl.max() and len(domainContL.nonzero()[0])>2 and len(domainContR.nonzero()[0])>2 :
continuL=n.median(spec1d[domainContL])
continuR=n.median(spec1d[domainContR])
# model : absorption first, emission second
def flG(aai,sig1,sig2,sig3,F1,F2,F3 ):
aa=aai[(aai<aTR)]
left=continuL + F1*(n.e**(-(aa-a0[0])**2./(2.*sig1**2.)))/(sig1*(2.*n.pi)**0.5) + F2*(n.e**(-(aa-a0[1])**2./(2.*sig2**2.)))/(sig2*(2.*n.pi)**0.5)
aa=aai[(aai>=aTR)]
right=continuR + F3*(n.e**(-(aa-a0[2])**2./(2.*sig3**2.)))/(sig3*(2.*n.pi)**0.5)
return n.hstack((left, right))
out = curve_fit(flG, wl[domainLine], spec1d[domainLine], p0=prior,sigma=err1d[domainLine],maxfev=500000000)
if out[1].__class__==n.ndarray : # if the fit worked
model1=flG(wl[domainLine],out[0][0],out[0][1],out[0][2],out[0][3],out[0][4],out[0][5])
var=err1d[domainLine]
chi2=n.sum(abs(model1-spec1d[domainLine])**2./var**2.)
ndof=len(var)
sig1=out[0][0]
sig1Err=out[1][0][0]**0.5
sig2=out[0][1]
sig2Err=out[1][1][1]**0.5
sig3=out[0][2]
sig3Err=out[1][2][2]**0.5
flux1=out[0][3]
flux1Err=out[1][3][3]**0.5
flux2=out[0][4]
flux2Err=out[1][4][4]**0.5
flux3=out[0][5]
flux3Err=out[1][5][5]**0.5
outPut=n.array([a0[0],flux1,flux1Err,sig1,sig1Err,a0[1],flux2,flux2Err,sig2,sig2Err,a0[2],flux3,flux3Err,sig3,sig3Err ,continuL,continuR,chi2,ndof])
mod=n.array([wl[domainLine],model1])
return outPut,mod,header
else :
return outPutNF,modNF,header
else :
return outPutNF,modNF,header
def fit_4lines(wl,spec1d,err1d,a0=n.array([2586.1,2599.6,2612.5,2626.3]),lineName="AAAA",lineName2="BBBB",lineName3="CCCC",lineName4="DDDD",aTR=2606.,cmin1=2400.,cmax1=2550.,cmin2=2650.,cmax2=2770.,prior=n.array([1.,1.,1.,1.,1e-18,-1e-18,1e-18,-1e-18]),model="gaussian"):
header=" a0_"+lineName+" flux_"+lineName+" fluxErr_"+lineName+" sigma_"+lineName+" sigmaErr_"+lineName+" a0_"+lineName2+" flux_"+lineName2+" fluxErr_"+lineName2+" sigma_"+lineName2+" sigmaErr_"+lineName2+" a0_"+lineName3+" flux_"+lineName3+" fluxErr_"+lineName3+" sigma_"+lineName3+" sigmaErr_"+lineName3+" a0_"+lineName4+" flux_"+lineName4+" fluxErr_"+lineName4+" sigma_"+lineName4+" sigmaErr_"+lineName4+" continu_"+lineName+" continu_"+lineName4+" chi2_"+lineName+" ndof_"+lineName
outPutNF=n.array([a0[0],self.dV,self.dV,self.dV,self.dV, a0[1],self.dV,self.dV,self.dV,self.dV, a0[2],self.dV,self.dV,self.dV,self.dV, a0[3],self.dV,self.dV,self.dV,self.dV, self.dV,self.dV,self.dV,self.dV])
modNF=n.array([self.dV,self.dV])
domainLine=(wl>a0.min()-self.fitWidth)&(wl<a0.max()+self.fitWidth)
domainContL=(wl>cmin1)&(wl<cmax1)
domainContR=(wl>cmin2)&(wl<cmax2)
if cmin1+50>wl.min() and cmax2-50<wl.max() and len(domainContL.nonzero()[0])>2 and len(domainContR.nonzero()[0])>2 :
continuL=n.median(spec1d[domainContL])
continuR=n.median(spec1d[domainContR])
# model : absorption first, emission second
def flG(aai,sig1,sig2,sig3,sig4,F1,F2,F3,F4 ):
aa=aai[(aai<aTR)]
left=continuL + F1*(n.e**(-(aa-a0[0])**2./(2.*sig1**2.)))/(sig1*(2.*n.pi)**0.5) + F2*(n.e**(-(aa-a0[1])**2./(2.*sig2**2.)))/(sig2*(2.*n.pi)**0.5)
aa=aai[(aai>=aTR)]
right=continuR + F3*(n.e**(-(aa-a0[2])**2./(2.*sig3**2.)))/(sig3*(2.*n.pi)**0.5) + F4*(n.e**(-(aa-a0[3])**2./(2.*sig4**2.)))/(sig4*(2.*n.pi)**0.5)
return n.hstack((left, right))
out = curve_fit(flG, wl[domainLine], spec1d[domainLine], p0=prior,sigma=err1d[domainLine],maxfev=500000000)
if out[1].__class__==n.ndarray : # if the fit worked
model1=flG(wl[domainLine],out[0][0],out[0][1],out[0][2],out[0][3],out[0][4],out[0][5],out[0][6],out[0][7])
var=err1d[domainLine]
chi2=n.sum(abs(model1-spec1d[domainLine])**2./var**2.)
ndof=len(var)
sig1=out[0][0]
sig1Err=out[1][0][0]**0.5
sig2=out[0][1]
sig2Err=out[1][1][1]**0.5
sig3=out[0][2]
sig3Err=out[1][2][2]**0.5
sig4=out[0][3]
sig4Err=out[1][3][3]**0.5
flux1=out[0][4]
flux1Err=out[1][4][4]**0.5
flux2=out[0][5]
flux2Err=out[1][5][5]**0.5
flux3=out[0][6]
flux3Err=out[1][6][6]**0.5
flux4=out[0][7]
flux4Err=out[1][7][7]**0.5
outPut=n.array([a0[0],flux1,flux1Err,sig1,sig1Err,a0[1],flux2,flux2Err,sig2,sig2Err,a0[2],flux3,flux3Err,sig3,sig3Err,a0[3],flux4,flux4Err,sig4,sig4Err ,continuL,continuR,chi2,ndof])
mod=n.array([wl[domainLine],model1])
return outPut,mod,header
else :
return outPutNF,modNF,header
else :
return outPutNF,modNF,header
def fit_6lines(wl,spec1d,err1d,a0=n.array([2326.7,2343.7,2365.3,2374.3,2382.2, 2396.2]),lineName="AAAA",lineName2="BBBB", lineName3="CCCC",lineName4="DDDD", lineName5="EEEE",lineName6="FFFF",aTR=2370.,cmin1=2080.,cmax1=2240.,cmin2=2400.,cmax2=2550.,prior=n.array([1.,1.,1.,1.,1.,1e-18,-1e-18,1e-18,-1e-18,-1e-18,1e-18,1.]),model="gaussian"):
header=" a0_"+lineName+" flux_"+lineName+" fluxErr_"+lineName+" sigma_"+lineName+" sigmaErr_"+lineName+" a0_"+lineName2+" flux_"+lineName2+" fluxErr_"+lineName2+" sigma_"+lineName2+" sigmaErr_"+lineName2+" a0_"+lineName3+" flux_"+lineName3+" fluxErr_"+lineName3+" sigma_"+lineName3+" sigmaErr_"+lineName3+" a0_"+lineName4+" flux_"+lineName4+" fluxErr_"+lineName4+" sigma_"+lineName4+" sigmaErr_"+lineName4+" a0_"+lineName5+" flux_"+lineName5+" fluxErr_"+lineName5+" sigma_"+lineName5+" sigmaErr_"+lineName5+" a0_"+lineName6+" flux_"+lineName6+" fluxErr_"+lineName6+" sigma_"+lineName6+" sigmaErr_"+lineName6+" continu_"+lineName+" continu_"+lineName6+" chi2_"+lineName+" ndof_"+lineName
outPutNF=n.array([a0[0],self.dV,self.dV,self.dV,self.dV, a0[1],self.dV,self.dV,self.dV,self.dV, a0[2],self.dV,self.dV,self.dV,self.dV, a0[3],self.dV,self.dV,self.dV,self.dV, a0[4],self.dV,self.dV,self.dV,self.dV, a0[5],self.dV,self.dV,self.dV,self.dV, self.dV,self.dV,self.dV,self.dV])
modNF=n.array([self.dV,self.dV])
domainLine=(wl>a0.min()-self.fitWidth)&(wl<a0.max()+self.fitWidth)
domainContL=(wl>cmin1)&(wl<cmax1)
domainContR=(wl>cmin2)&(wl<cmax2)
if cmin1+50>wl.min() and cmax2-50<wl.max() and len(domainContL.nonzero()[0])>2 and len(domainContR.nonzero()[0])>2 :
continuL=n.median(spec1d[domainContL])
continuR=n.median(spec1d[domainContR])
# model : absorption first, emission second
def flG(aai,sig1,sig2,sig3,sig4,sig5,F1,F2,F3,F4,F5,F6,sig6 ):
aa=aai[(aai<aTR)]
left=continuL + F1*(n.e**(-(aa-a0[0])**2./(2.*sig1**2.)))/(sig1*(2.*n.pi)**0.5) + F2*(n.e**(-(aa-a0[1])**2./(2.*sig2**2.)))/(sig2*(2.*n.pi)**0.5) + F3*(n.e**(-(aa-a0[2])**2./(2.*sig3**2.)))/(sig3*(2.*n.pi)**0.5)
aa=aai[(aai>=aTR)]
right=continuR + F4*(n.e**(-(aa-a0[3])**2./(2.*sig4**2.)))/(sig4*(2.*n.pi)**0.5) + F5*(n.e**(-(aa-a0[4])**2./(2.*sig5**2.)))/(sig5*(2.*n.pi)**0.5) + F6*(n.e**(-(aa-a0[5])**2./(2.*sig6*2.)))/(sig6*(2.*n.pi)**0.5)
return n.hstack((left, right))
out = curve_fit(flG, wl[domainLine], spec1d[domainLine], p0=prior,sigma=err1d[domainLine],maxfev=500000000)
if out[1].__class__==n.ndarray : # if the fit worked
model1=flG(wl[domainLine],out[0][0],out[0][1],out[0][2],out[0][3],out[0][4],out[0][5],out[0][6],out[0][7],out[0][8],out[0][9],out[0][10],out[0][11])
var=err1d[domainLine]
chi2=n.sum(abs(model1-spec1d[domainLine])**2./var**2.)
ndof=len(var)
sig1=out[0][0]
sig1Err=out[1][0][0]**0.5
sig2=out[0][1]
sig2Err=out[1][1][1]**0.5
sig3=out[0][2]
sig3Err=out[1][2][2]**0.5
sig4=out[0][3]
sig4Err=out[1][3][3]**0.5
sig5=out[0][4]
sig5Err=out[1][4][4]**0.5
flux1=out[0][5]
flux1Err=out[1][5][5]**0.5
flux2=out[0][6]
flux2Err=out[1][6][6]**0.5
flux3=out[0][7]
flux3Err=out[1][7][7]**0.5
flux4=out[0][8]
flux4Err=out[1][8][8]**0.5
flux5=out[0][9]
flux5Err=out[1][9][9]**0.5
flux6=out[0][10]
flux6Err=out[1][10][10]**0.5
sig6=out[0][11]
sig6Err=out[1][11][11]**0.5
outPut=n.array([a0[0],flux1,flux1Err,sig1,sig1Err,a0[1],flux2,flux2Err,sig2,sig2Err,a0[2],flux3,flux3Err,sig3,sig3Err,a0[3],flux4,flux4Err,sig4,sig4Err,a0[4],flux5,flux5Err,sig5,sig5Err,a0[5],flux6,flux6Err,sig6,sig6Err ,continuL,continuR,chi2,ndof])
mod=n.array([wl[domainLine],model1])
return outPut,mod,header
else :
return outPutNF,modNF,header
else :
return outPutNF,modNF,header
def fit_4000D(wl,spec1d,err1d,z,intLim4k=n.array([3600-150, 3600, 4140, 4140+150])):
""" returns the log of the ratio of the integrated fluxes
and the log of the ratio of the flux median density"""
header=" logd4k logd4kU logd4kL" #left4kMedian left4kMedianU left4kMedianL right4kMedian right4kMedianU right4kMedianL "
modNF=n.array([self.dV,self.dV])
outPutNF=n.array([self.dV,self.dV,self.dV])
if wl.min()<intLim4k[0]*(1+z) and wl.max()>intLim4k[3]*(1+z):
toInt=interp1d(wl,spec1d)
left=quad(toInt,intLim4k[0]*(1+z),intLim4k[1]*(1+z))[0]
right=quad(toInt,intLim4k[2]*(1+z),intLim4k[3]*(1+z))[0]
toInt=interp1d(wl,spec1d+err1d)
leftU=quad(toInt,intLim4k[0]*(1+z),intLim4k[1]*(1+z))[0]
rightU=quad(toInt,intLim4k[2]*(1+z),intLim4k[3]*(1+z))[0]
toInt=interp1d(wl,spec1d-err1d)
leftL=quad(toInt,intLim4k[0]*(1+z),intLim4k[1]*(1+z))[0]
rightL=quad(toInt,intLim4k[2]*(1+z),intLim4k[3]*(1+z))[0]
d4=n.log10(right/left)
d4U=n.log10(rightL/leftU)
d4L=n.log10(rightU/leftL)
leftMedian=n.median(spec1d[(wl>intLim4k[0]*(1+z))&(wl<intLim4k[1]*(1+z))])
leftMedianU=n.median(spec1d[(wl>intLim4k[0]*(1+z))&(wl<intLim4k[1]*(1+z))]+err1d[(wl>intLim4k[0]*(1+z))&(wl<intLim4k[1]*(1+z))])
leftMedianL=n.median(spec1d[(wl>intLim4k[0]*(1+z))&(wl<intLim4k[1]*(1+z))]-err1d[(wl>intLim4k[0]*(1+z))&(wl<intLim4k[1]*(1+z))])
rightMedian=n.median(spec1d[(wl>intLim4k[2]*(1+z))&(wl<intLim4k[3]*(1+z))])
rightMedianU=n.median(spec1d[(wl>intLim4k[2]*(1+z))&(wl<intLim4k[3]*(1+z))]+err1d[(wl>intLim4k[2]*(1+z))&(wl<intLim4k[3]*(1+z))])
rightMedianL=n.median(spec1d[(wl>intLim4k[2]*(1+z))&(wl<intLim4k[3]*(1+z))]-err1d[(wl>intLim4k[2]*(1+z))&(wl<intLim4k[3]*(1+z))])
dM4=n.log10(rightMedian/leftMedian)
dM4U=n.log10(rightMedianL/leftMedianU)
dM4L=n.log10(rightMedianU/leftMedianL)
outPut=n.array([d4,d4U,d4L]) #,leftMedian,leftMedianU,leftMedianL,rightMedian,rightMedianU,rightMedianL
mod=n.array([self.dV,self.dV])
return outPut,mod,header
else:
return outPutNF,modNF,header
def fit_UV_slope(wl,spec1d,err1d,z,intLimUV=n.array([2000,2200,3000,3200,3400,3600,4100, 4300,4500,4700])):
""" returns the luminosities in UV wavelength bands"""
header=" L2100 L2100Err L3100 L3100Err L3500 L3500Err L4200 L4200Err L4600 L4600Err"
modNF=n.array([self.dV,self.dV])
if wl.min()<intLimUV[0]*(1+z) and wl.max()>intLimUV[1]*(1+z) :
toInt=interp1d(wl,spec1d)
L2100=quad(toInt,intLimUV[0]*(1+z),intLimUV[1]*(1+z))[0]
toInt=interp1d(wl,spec1d+err1d)
L2100U=quad(toInt,intLimUV[0]*(1+z),intLimUV[1]*(1+z))[0]
toInt=interp1d(wl,spec1d-err1d)
L2100L=quad(toInt,intLimUV[0]*(1+z),intLimUV[1]*(1+z))[0]
else:
L2100,L2100U,L2100L=self.dV,self.dV,self.dV
if wl.min()<intLimUV[2]*(1+z) and wl.max()>intLimUV[3]*(1+z) :
toInt=interp1d(wl,spec1d)
L3100=quad(toInt,intLimUV[2]*(1+z),intLimUV[3]*(1+z))[0]
toInt=interp1d(wl,spec1d+err1d)
L3100U=quad(toInt,intLimUV[2]*(1+z),intLimUV[3]*(1+z))[0]
toInt=interp1d(wl,spec1d-err1d)
L3100L=quad(toInt,intLimUV[2]*(1+z),intLimUV[3]*(1+z))[0]
else:
L3100,L3100U,L3100L=self.dV,self.dV,self.dV
if wl.min()<intLimUV[4]*(1+z) and wl.max()>intLimUV[5]*(1+z) :
toInt=interp1d(wl,spec1d)
L3500=quad(toInt,intLimUV[4]*(1+z),intLimUV[5]*(1+z))[0]
toInt=interp1d(wl,spec1d+err1d)
L3500U=quad(toInt,intLimUV[4]*(1+z),intLimUV[5]*(1+z))[0]
toInt=interp1d(wl,spec1d-err1d)
L3500L=quad(toInt,intLimUV[4]*(1+z),intLimUV[5]*(1+z))[0]
else:
L3500,L3500U,L3500L=self.dV,self.dV,self.dV
if wl.min()<intLimUV[6]*(1+z) and wl.max()>intLimUV[7]*(1+z) :
toInt=interp1d(wl,spec1d)
L4200=quad(toInt,intLimUV[6]*(1+z),intLimUV[7]*(1+z))[0]
toInt=interp1d(wl,spec1d+err1d)
L4200U=quad(toInt,intLimUV[6]*(1+z),intLimUV[7]*(1+z))[0]
toInt=interp1d(wl,spec1d-err1d)
L4200L=quad(toInt,intLimUV[6]*(1+z),intLimUV[7]*(1+z))[0]
else:
L4200,L4200U,L4200L=self.dV,self.dV,self.dV
if wl.min()<intLimUV[8]*(1+z) and wl.max()>intLimUV[9]*(1+z) :
toInt=interp1d(wl,spec1d)
L4600=quad(toInt,intLimUV[8]*(1+z),intLimUV[9]*(1+z))[0]
toInt=interp1d(wl,spec1d+err1d)
L4600U=quad(toInt,intLimUV[8]*(1+z),intLimUV[9]*(1+z))[0]
toInt=interp1d(wl,spec1d-err1d)
L4600L=quad(toInt,intLimUV[8]*(1+z),intLimUV[9]*(1+z))[0]
else:
L4600,L4600U,L4600L=self.dV,self.dV,self.dV
outPut=n.array([L2100, (L2100U-L2100L)/2., L3100, (L3100U-L3100L)/2., L3500, (L3500U-L3500L)/2., L4200, (L4200U-L4200L)/2., L4600, (L4600U-L4600L)/2.])
return outPut,modNF,header
| |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the debug events writer Python class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.core.protobuf import debug_event_pb2
from tensorflow.python.debug.lib import debug_events_monitors
from tensorflow.python.debug.lib import debug_events_reader
from tensorflow.python.debug.lib import dumping_callback
from tensorflow.python.debug.lib import dumping_callback_test_lib
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
class TestMonitor(debug_events_monitors.BaseMonitor):
def __init__(self, debug_data_reader):
super(TestMonitor, self).__init__(debug_data_reader)
# Mapping execution index to Execution data objects.
self.executions = dict()
# Mapping graph execution trace index to GraphExecutionTrace data objects.
self.graph_execution_traces = dict()
def on_execution(self, execution_index, execution):
if execution_index in self.executions:
raise ValueError("Duplicate execution index: %d" % execution_index)
self.executions[execution_index] = execution
def on_graph_execution_trace(self, graph_execution_trace_index,
graph_execution_trace):
if graph_execution_trace_index in self.graph_execution_traces:
raise ValueError("Duplicate graph-execution-trace index: %d" %
graph_execution_trace_index)
self.graph_execution_traces[
graph_execution_trace_index] = graph_execution_trace
class DebugEventsMonitorTest(dumping_callback_test_lib.DumpingCallbackTestBase,
parameterized.TestCase):
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("ConciseHealth", "CONCISE_HEALTH"),
("FullHealth", "FULL_HEALTH"),
("FullTensor", "FULL_TENSOR"),
)
def testOnExecutionIsCalled(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
x = constant_op.constant([[1, 2], [3, 4]], dtype=dtypes.float32)
y = constant_op.constant([[-1], [1]], dtype=dtypes.float32)
math_ops.matmul(x, y)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
test_monitor = TestMonitor(reader)
reader.update()
self.assertLen(test_monitor.executions, 1)
self.assertEmpty(test_monitor.graph_execution_traces)
execution = test_monitor.executions[0]
self.assertTrue(execution.wall_time)
self.assertEqual(execution.op_type, "MatMul")
self.assertLen(execution.output_tensor_device_ids, 1)
self.assertLen(execution.input_tensor_ids, 2)
self.assertLen(execution.output_tensor_ids, 1)
self.assertEqual(execution.num_outputs, 1)
self.assertEqual(execution.graph_id, "")
if tensor_debug_mode == "NO_TENSOR":
self.assertIsNone(execution.debug_tensor_values)
elif tensor_debug_mode == "CONCISE_HEALTH":
self.assertLen(execution.debug_tensor_values, 1)
# [tensor_id, element_count, neg_inf_count, pos_inf_count, nan_count].
self.assertLen(execution.debug_tensor_values[0], 5)
elif tensor_debug_mode == "FULL_HEALTH":
self.assertLen(execution.debug_tensor_values, 1)
# [tensor_id, device_id, dtype, rank, element_count,
# neg_inf_count, pos_inf_count, nan_count,
# neg_finite_count, zero_count, pos_finite_count].
self.assertLen(execution.debug_tensor_values[0], 11)
elif tensor_debug_mode == "FULL_TENSOR":
# Full tensor values are not stored in the debug_tensor_values field.
self.assertIsNone(execution.debug_tensor_values)
self.assertAllClose(
reader.execution_to_tensor_values(execution), [[[1.], [1.]]])
@parameterized.named_parameters(
("ConciseHealth", "CONCISE_HEALTH"),
("FullHealth", "FULL_HEALTH"),
("FullTensor", "FULL_TENSOR"),
)
def testOnGraphExecutionTraceIsCalled(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
@def_function.function
def unique_sum(xs):
"""Sum over the unique values, for testing."""
unique_xs, indices = array_ops.unique(xs)
return math_ops.reduce_sum(unique_xs), indices
xs = constant_op.constant([2., 6., 8., 1., 2.], dtype=dtypes.float32)
unique_sum(xs)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
test_monitor = TestMonitor(reader)
reader.update()
self.assertLen(test_monitor.executions, 1)
execution = test_monitor.executions[0]
self.assertTrue(execution.wall_time)
self.assertStartsWith(execution.op_type, "__inference_unique_sum")
self.assertLen(execution.output_tensor_device_ids, 2)
self.assertLen(execution.input_tensor_ids, 1)
self.assertLen(execution.output_tensor_ids, 2)
self.assertEqual(execution.num_outputs, 2)
self.assertTrue(execution.graph_id)
traces = test_monitor.graph_execution_traces
if tensor_debug_mode == "CONCISE_HEALTH":
self.assertLen(traces, 3) # [Placeholder:0, Unique:0 , Sum:0].
self.assertEqual(traces[0].op_type, "Placeholder")
self.assertEqual(traces[0].output_slot, 0)
self.assertEqual(traces[1].op_type, "Unique")
self.assertEqual(traces[1].output_slot, 0)
# Unique:1 is not traced under CONCISE_HEALTH mode, as it's int-dtype.
self.assertEqual(traces[2].op_type, "Sum")
self.assertEqual(traces[2].output_slot, 0)
# [tensor_id, element_count, neg_inf_count, pos_inf_count, nan_count].
self.assertLen(traces[0].debug_tensor_value, 5)
self.assertLen(traces[1].debug_tensor_value, 5)
self.assertLen(traces[2].debug_tensor_value, 5)
elif tensor_debug_mode == "FULL_HEALTH":
self.assertLen(traces, 3) # [Placeholder:0, Unique:0 , Sum:0].
self.assertEqual(traces[0].op_type, "Placeholder")
self.assertEqual(traces[0].output_slot, 0)
self.assertEqual(traces[1].op_type, "Unique")
self.assertEqual(traces[1].output_slot, 0)
# Unique:1 is not traced under FULL_HEALTH mode, as it's int-dtype.
self.assertEqual(traces[2].op_type, "Sum")
self.assertEqual(traces[2].output_slot, 0)
# [tensor_id, device_id, dtype, rank, element_count,
# neg_inf_count, pos_inf_count, nan_count,
# neg_finite_count, zero_count, pos_finite_count].
self.assertLen(traces[0].debug_tensor_value, 11)
self.assertLen(traces[1].debug_tensor_value, 11)
self.assertLen(traces[2].debug_tensor_value, 11)
elif tensor_debug_mode == "FULL_TENSOR":
# [Placeholder:0, Unique:0, Unique:1, Const:0, Sum:0].
self.assertLen(traces, 5)
self.assertEqual(traces[0].op_type, "Placeholder")
self.assertEqual(traces[0].output_slot, 0)
self.assertIsNone(traces[0].debug_tensor_value)
self.assertAllEqual(
reader.graph_execution_trace_to_tensor_value(traces[0]),
[2., 6., 8., 1., 2.])
self.assertEqual(traces[1].op_type, "Unique")
self.assertEqual(traces[1].output_slot, 0)
self.assertIsNone(traces[1].debug_tensor_value)
self.assertAllEqual(
reader.graph_execution_trace_to_tensor_value(traces[1]),
[2., 6., 8., 1.])
self.assertEqual(traces[2].op_type, "Unique")
self.assertEqual(traces[2].output_slot, 1)
self.assertIsNone(traces[2].debug_tensor_value)
self.assertAllEqual(
reader.graph_execution_trace_to_tensor_value(traces[2]),
[0, 1, 2, 3, 0])
self.assertEqual(traces[3].op_type, "Const")
self.assertEqual(traces[3].output_slot, 0)
self.assertIsNone(traces[3].debug_tensor_value)
self.assertAllClose(
reader.graph_execution_trace_to_tensor_value(traces[3]), [0])
self.assertEqual(traces[4].op_type, "Sum")
self.assertEqual(traces[4].output_slot, 0)
self.assertIsNone(traces[4].debug_tensor_value)
self.assertAllClose(
reader.graph_execution_trace_to_tensor_value(traces[4]), 17.)
class AlertDataObjectsTest(test_util.TensorFlowTestCase):
"""Unit tests for alert-class objects."""
def testInfNanMonitor(self):
alert = debug_events_monitors.InfNanAlert(
1234,
"FooOp",
1,
size=1000,
num_neg_inf=5,
num_pos_inf=10,
num_nan=20,
execution_index=777,
graph_execution_trace_index=888)
self.assertEqual(alert.wall_time, 1234)
self.assertEqual(alert.op_type, "FooOp")
self.assertEqual(alert.output_slot, 1)
self.assertEqual(alert.size, 1000)
self.assertEqual(alert.num_neg_inf, 5)
self.assertEqual(alert.num_pos_inf, 10)
self.assertEqual(alert.num_nan, 20)
self.assertEqual(alert.execution_index, 777)
self.assertEqual(alert.graph_execution_trace_index, 888)
class InfNanMonitorTest(test_util.TensorFlowTestCase, parameterized.TestCase):
def testInfNanMonitorStartsWithEmptyAlerts(self):
mock_reader = test.mock.MagicMock()
monitor = debug_events_monitors.InfNanMonitor(mock_reader)
self.assertEmpty(monitor.alerts())
def testInfNanMonitorOnExecutionUnderCurtHealthMode(self):
mock_reader = test.mock.MagicMock()
monitor = debug_events_monitors.InfNanMonitor(mock_reader)
execution_digest = debug_events_reader.ExecutionDigest(
1234, 1, "FooOp", output_tensor_device_ids=[0, 1])
execution = debug_events_reader.Execution(
execution_digest,
"worker01", ["a1", "b2", "e3"],
debug_event_pb2.TensorDebugMode.CURT_HEALTH,
graph_id=None,
input_tensor_ids=[12, 34],
output_tensor_ids=[56, 78],
debug_tensor_values=[[-1, 0], [-1, 1]]) # [tensor_id, any_inf_nan].
monitor.on_execution(50, execution)
self.assertLen(monitor.alerts(), 1)
alert = monitor.alerts()[0]
self.assertEqual(alert.wall_time, 1234)
self.assertEqual(alert.op_type, "FooOp")
self.assertEqual(alert.output_slot, 1)
# The four fields below are unavailable under CURT_HEALTH mode by design.
self.assertIsNone(alert.size)
self.assertIsNone(alert.num_neg_inf)
self.assertIsNone(alert.num_pos_inf)
self.assertIsNone(alert.num_nan)
self.assertEqual(alert.execution_index, 50)
self.assertIsNone(alert.graph_execution_trace_index)
@parameterized.named_parameters(
("ConciseHealth",
debug_event_pb2.TensorDebugMode.CONCISE_HEALTH,
# [tensor_id, size, num_neg_inf, num_pos_inf, num_nan].
[[-1, 10, 1, 2, 3],
[-1, 100, 0, 0, 0]]),
("FullHealth",
debug_event_pb2.TensorDebugMode.FULL_HEALTH,
# [tensor_id, device_id, dtype, rank, element_count,
# neg_inf_count, pos_inf_count, nan_count,
# neg_finite_count, zero_count, pos_finite_count].
[[-1, -1, 1, 1, 10, 1, 2, 3, 0, 0, 0],
[-1, -1, 1, 1, 100, 0, 0, 0, 10, 30, 60]]),
)
def testInfNanMonitorOnExecutionUnderHealthMode(self,
tensor_debug_mode,
debug_tensor_values):
mock_reader = test.mock.MagicMock()
monitor = debug_events_monitors.InfNanMonitor(mock_reader)
execution_digest = debug_events_reader.ExecutionDigest(
1234, 1, "BarOp", output_tensor_device_ids=[0, 1])
execution = debug_events_reader.Execution(
execution_digest,
"worker01",
["a1", "b2", "e3"],
tensor_debug_mode,
graph_id=None,
input_tensor_ids=[12, 34],
output_tensor_ids=[56, 78],
debug_tensor_values=debug_tensor_values)
monitor.on_execution(60, execution)
self.assertLen(monitor.alerts(), 1)
alert = monitor.alerts()[0]
self.assertEqual(alert.wall_time, 1234)
self.assertEqual(alert.op_type, "BarOp")
self.assertEqual(alert.output_slot, 0)
self.assertEqual(alert.size, 10)
self.assertEqual(alert.num_neg_inf, 1)
self.assertEqual(alert.num_pos_inf, 2)
self.assertEqual(alert.num_nan, 3)
self.assertEqual(alert.execution_index, 60)
self.assertIsNone(alert.graph_execution_trace_index)
@parameterized.named_parameters(
("Shape",
debug_event_pb2.TensorDebugMode.SHAPE,
# [tensor_id, dtype, rank, element_cont, ...shape_truncate_6]
[[-1, 1, 2, 6, 3, 2, 0, 0, 0, 0],
[-1, 10, 1, 7, 7, 0, 0, 0, 0, 0]]),
)
def testInfNanMonitorOnExecutionUnderModeWithNoInfNanInfo(
self,
tensor_debug_mode,
debug_tensor_values):
mock_reader = test.mock.MagicMock()
monitor = debug_events_monitors.InfNanMonitor(mock_reader)
execution_digest = debug_events_reader.ExecutionDigest(
1234, 1, "BarOp", output_tensor_device_ids=[0, 1])
execution = debug_events_reader.Execution(
execution_digest,
"worker01",
["a1", "b2", "e3"],
tensor_debug_mode,
graph_id=None,
input_tensor_ids=[12, 34],
output_tensor_ids=[56, 78],
debug_tensor_values=debug_tensor_values)
monitor.on_execution(60, execution)
self.assertEmpty(monitor.alerts())
@parameterized.named_parameters(
("FloatsScalarWithInfAndNan", np.inf, np.float32, 1, 0, 1, 0),
("Floats2DWithInfAndNan", [[0, np.nan, np.nan, -np.inf]
], np.float32, 4, 1, 0, 2),
("Floats1DWithoutInfOrNan", [0, -1e6, 1e6, 9e5], np.float32, 4, 0, 0, 0),
("Integers", [[0, 1000, -200, -300]], np.int32, 4, 0, 0, 0),
("Booleans", [False, True, False, False], np.int32, 4, 0, 0, 0),
)
def testInfNanMonitorOnExecutionUnderFullTensorModeWorks(
self, tensor_value, dtype, expected_size, expected_num_neg_inf,
expected_num_pos_inf, expected_num_nan):
mock_reader = test.mock.MagicMock()
mock_reader.execution_to_tensor_values.return_value = [
np.array([[0.0, -1.0, 1.0]]),
np.array(tensor_value, dtype=dtype)
]
monitor = debug_events_monitors.InfNanMonitor(mock_reader)
execution_digest = debug_events_reader.ExecutionDigest(
1234,
1,
"__inference_bar_function_1234",
output_tensor_device_ids=[0, 1])
execution = debug_events_reader.Execution(
execution_digest,
"worker01", ["a1", "b2", "e3"],
debug_event_pb2.TensorDebugMode.FULL_TENSOR,
graph_id=None,
input_tensor_ids=[12, 34],
output_tensor_ids=[56, 78])
monitor.on_execution(70, execution)
if expected_num_neg_inf or expected_num_pos_inf or expected_num_nan:
self.assertLen(monitor.alerts(), 1)
alert = monitor.alerts()[0]
self.assertEqual(alert.wall_time, 1234)
self.assertEqual(alert.op_type, "__inference_bar_function_1234")
self.assertEqual(alert.output_slot, 1)
self.assertEqual(alert.size, expected_size)
self.assertEqual(alert.num_neg_inf, expected_num_neg_inf)
self.assertEqual(alert.num_pos_inf, expected_num_pos_inf)
self.assertEqual(alert.num_nan, expected_num_nan)
self.assertEqual(alert.execution_index, 70)
self.assertIsNone(alert.graph_execution_trace_index, 70)
else:
self.assertEmpty(monitor.alerts())
def testInfNaNMonitorOnGraphExecutionTraceCurtHealthMode(self):
mock_reader = test.mock.MagicMock()
monitor = debug_events_monitors.InfNanMonitor(mock_reader)
trace_digest = debug_events_reader.GraphExecutionTraceDigest(
1234, 1, "FooOp", "FooOp_1", 2, "g1")
trace = debug_events_reader.GraphExecutionTrace(
trace_digest, ["g0", "g1"],
debug_event_pb2.TensorDebugMode.CURT_HEALTH,
debug_tensor_value=[9, 1]) # [tensor_id, any_inf_nan].
monitor.on_graph_execution_trace(55, trace)
self.assertLen(monitor.alerts(), 1)
alert = monitor.alerts()[0]
self.assertEqual(alert.wall_time, 1234)
self.assertEqual(alert.op_type, "FooOp")
self.assertEqual(alert.output_slot, 2)
# The four fields below are unavailable under CURT_HEALTH mode by design.
self.assertIsNone(alert.size)
self.assertIsNone(alert.num_neg_inf)
self.assertIsNone(alert.num_pos_inf)
self.assertIsNone(alert.num_nan)
self.assertIsNone(alert.execution_index)
self.assertEqual(alert.graph_execution_trace_index, 55)
def testInfNaNMonitorOnGraphExecutionTraceConciseHealthMode(self):
mock_reader = test.mock.MagicMock()
monitor = debug_events_monitors.InfNanMonitor(mock_reader)
trace_digest = debug_events_reader.GraphExecutionTraceDigest(
1234, 1, "FooOp", "FooOp_1", 2, "g1")
trace = debug_events_reader.GraphExecutionTrace(
trace_digest,
["g0", "g1"],
debug_event_pb2.TensorDebugMode.CONCISE_HEALTH,
# [tensor_id, size, num_neg_inf, num_pos_inf, num_nan].
debug_tensor_value=[9, 100, 3, 2, 1])
monitor.on_graph_execution_trace(55, trace)
self.assertLen(monitor.alerts(), 1)
alert = monitor.alerts()[0]
self.assertEqual(alert.wall_time, 1234)
self.assertEqual(alert.op_type, "FooOp")
self.assertEqual(alert.output_slot, 2)
self.assertEqual(alert.size, 100)
self.assertEqual(alert.num_neg_inf, 3)
self.assertEqual(alert.num_pos_inf, 2)
self.assertEqual(alert.num_nan, 1)
self.assertEqual(alert.graph_execution_trace_index, 55)
@parameterized.named_parameters(
("FloatsScalarWithInfAndNan", np.inf, np.float32, 1, 0, 1, 0),
("Floats2DWithInfAndNan", [[0, np.nan, np.nan, -np.inf]
], np.float32, 4, 1, 0, 2),
("Floats1DWithoutInfOrNan", [0, -1e6, 1e6, 9e5], np.float32, 4, 0, 0, 0),
("Integers", [[0, 1000, -200, -300]], np.int32, 4, 0, 0, 0),
("Booleans", [False, True, False, False], np.int32, 4, 0, 0, 0),
)
def testInfNanMonitorOnGraphExecutionTraceUnderFullTensorModeWorks(
self, tensor_value, dtype, expected_size, expected_num_neg_inf,
expected_num_pos_inf, expected_num_nan):
mock_reader = test.mock.MagicMock()
mock_reader.graph_execution_trace_to_tensor_value.return_value = np.array(
tensor_value, dtype=dtype)
monitor = debug_events_monitors.InfNanMonitor(mock_reader)
trace_digest = debug_events_reader.GraphExecutionTraceDigest(
1234, 1, "BazOp", "name_scope_3/BazOp_1", 2, "g1")
trace = debug_events_reader.GraphExecutionTrace(
trace_digest, ["g0", "g1"], debug_event_pb2.TensorDebugMode.FULL_TENSOR)
monitor.on_graph_execution_trace(80, trace)
if expected_num_neg_inf or expected_num_pos_inf or expected_num_nan:
self.assertLen(monitor.alerts(), 1)
alert = monitor.alerts()[0]
self.assertEqual(alert.wall_time, 1234)
self.assertEqual(alert.op_type, "BazOp")
self.assertEqual(alert.output_slot, 2)
self.assertEqual(alert.size, expected_size)
self.assertEqual(alert.num_neg_inf, expected_num_neg_inf)
self.assertEqual(alert.num_pos_inf, expected_num_pos_inf)
self.assertEqual(alert.num_nan, expected_num_nan)
self.assertIsNone(alert.execution_index)
self.assertEqual(alert.graph_execution_trace_index, 80)
else:
self.assertEmpty(monitor.alerts())
def testLimitingInfNanMonitorAlertCountWorks(self):
mock_reader = test.mock.MagicMock()
monitor = debug_events_monitors.InfNanMonitor(mock_reader, limit=3)
for i in range(10):
execution_digest = debug_events_reader.ExecutionDigest(
i * 1000, 1, "FooOp", output_tensor_device_ids=[0, 1])
execution = debug_events_reader.Execution(
execution_digest,
"worker01", ["a1", "b2", "e3"],
debug_event_pb2.TensorDebugMode.CURT_HEALTH,
graph_id=None,
input_tensor_ids=[12, 34],
output_tensor_ids=[56, 78],
debug_tensor_values=[[-1, 0], [-1, 1]]) # [tensor_id, any_inf_nan].
monitor.on_execution(i, execution)
alerts = monitor.alerts()
self.assertLen(alerts, 3)
for i, alert in enumerate(alerts):
self.assertEqual(alert.wall_time, i * 1000)
self.assertEqual(alert.op_type, "FooOp")
self.assertEqual(alert.output_slot, 1)
# The four fields below are unavailable under CURT_HEALTH mode by design.
self.assertIsNone(alert.size)
self.assertIsNone(alert.num_neg_inf)
self.assertIsNone(alert.num_pos_inf)
self.assertIsNone(alert.num_nan)
self.assertEqual(alert.execution_index, i)
self.assertIsNone(alert.graph_execution_trace_index)
if __name__ == "__main__":
ops.enable_eager_execution()
googletest.main()
| |
#!/usr/bin/env python3
import os
import sys
import fnmatch
import subprocess
import argparse
from datetime import date
from collections import defaultdict
from configparser import SafeConfigParser
from fanfic_scraper import current_fanfic
from pony.orm import db_session, select, count
from fanfic_scraper.db_pony import DataBaseLogic, Category, Fanfic
from fanfic_scraper import cui
import textwrap
from time import sleep
basePath = '/home/ubuntu/OneDrive/'
arcRoot = 'FF_Archive'
db = None
db_name = 'FanfictionDB.db'
db_folder = 'Read'
dl_folder = 'htm'
def ensure_dir(path):
if not os.path.exists(path):
os.makedirs(path)
def get_config():
# create config dir if it doesn't exist and return path to config.
home = os.path.expanduser("~")
cfg = os.path.join(home, ".ff")
ensure_dir(cfg)
return os.path.join(cfg, "config.ini")
def config_load():
global basePath
global arcRoot
global db_folder
global db_name
global dl_folder
t_path = ''
t_root = ''
t_dbname = ''
t_dbfolder = ''
t_dlfolder = ''
config = SafeConfigParser()
cfg = get_config()
if os.path.isfile(cfg):
config.read(cfg)
try:
t_path = config.get('path', 'path')
t_root = config.get('path', 'rootfolder')
t_dbfolder = config.get('path', 'dbfolder')
t_dbname = config.get('path', 'dbname')
t_dlfolder = config.get('path', 'downloadfolder')
except:
i = 0
if t_path:
basePath = t_path
if t_root:
arcRoot = t_root
if t_dbfolder:
db_folder = t_dbfolder
if t_dbname:
db_name = t_dbname
if t_dlfolder:
dl_folder = t_dlfolder
def config_save():
cfg = get_config()
config = SafeConfigParser()
config.read(cfg)
if not config.has_section('path'):
config.add_section('path')
config.set('path', 'path', basePath)
config.set('path', 'rootfolder', arcRoot)
config.set('path', 'dbfolder', db_folder)
config.set('path', 'dbname', db_name)
config.set('path', 'downloadfolder', dl_folder)
with open(cfg, 'w') as f:
config.write(f)
def IsNotStale(update_date, last_checked):
wait = 0
ret = False
if not last_checked:
ret = True
if not ret:
if not update_date:
ret = True
if not ret:
d0 = update_date
d1 = last_checked
delta = d1 - d0
if delta.days > 360:
wait = 180
if delta.days > 180:
wait = 90
if delta.days > 90:
wait = 60
if delta.days > 60:
wait = 30
if delta.days > 30:
wait = 7
if delta.days < 30:
wait = 0
d0 = last_checked
d1 = date.today()
delta = d1 - d0
if delta.days > wait and wait > 0:
ret = False
else:
ret = True
return ret
def check_url(url):
ret = False
if 'fanfiction.net' in url:
return True
if 'fanfictionproxy.net' in url:
return True
if 'hpfanficarchive.com' in url:
return True
return ret
def set_ffargs(location, folder):
parser = argparse.ArgumentParser(description=('fanfic_scraper args'))
parser.add_argument(
"-f", "--folder", required=True,
help="Name of folder to place fanfic in.")
parser.add_argument(
"-l", "--location", default=os.getcwd(),
help="set download location")
# parser.add_argument(
# "-c", "--chapters", default=False,
# help="Specify chapters to download separated by : (10:15).")
parser.add_argument(
"-ct", "--chapterthreads", default=1,
help="Number of parallel chapters downloads.")
parser.add_argument(
"-wt", "--waittime", default=15,
help="Wait time before retry if encountered with an error")
parser.add_argument(
"-rt", "--retries", default=10,
help="Number of retries before giving up")
args = parser.parse_args(['--location=' + location, '--folder=' + folder])
return args
def print_list(matches):
divisor = 6
result = ''
for i in range(int(len(matches) / divisor)):
j = divisor * (i * 1)
result = ''
for j in range(j, j + (divisor)):
result = result + matches[j] + '\t'
print(result)
k = len(matches) % divisor
l = (int(len(matches) / divisor) * divisor)
result = ''
for m in range(l, len(matches)):
result = result + matches[m] + '\t'
print(result)
def get_subfolders(path):
subfolders = [f.name for f in os.scandir(path) if f.is_dir()]
subfolders.sort()
return subfolders
def get_files(path, value):
matches = []
for root, dirs, files in os.walk(path):
for filename in fnmatch.filter(files, value):
matches.append(filename)
# Sort files alphabetically
matches.sort()
return matches
def convert_file(source, htm):
txt = htm.split('.')[0] + '.txt'
ifile = [os.path.abspath(os.path.join(source, htm))]
ofile = os.path.abspath(os.path.join(source, txt))
cmd = ['html2text', '--ignore-emphasis', '-b 80']
msg = 'Converting file: {0} to text file: {1}'
print(msg.format(htm, txt))
with open(ofile, "w") as outfile:
subprocess.call(cmd + ifile, stdout=outfile)
for i in ifile:
os.remove(i)
class FanficDB:
def __init__(self):
db_path = os.path.join(basePath, arcRoot, db_folder, db_name)
dbhandle = DataBaseLogic(db_path)
def add_story(self, folder, cat_id, url, info):
with db_session:
Fanfic(Title=info['Title'],
Author=info['Author'],
Folder=folder,
Count=0,
Description=info['Description'],
Internet=url,
StoryId=info['StoryId'],
Publish_Date=info['Publish_Date'],
Update_Date=info['Update_Date'],
Last_Checked=info['Publish_Date'],
Category_Id=cat_id)
def add_cat(self, category):
with db_session:
Category(Name=category)
def get_categories(self):
d = defaultdict(list)
with db_session:
categories = select(c for c in Category
if c.Id > 0).order_by(Category.Name)
for c in categories:
for k, v in c.to_dict().items():
d[k].append(v)
return d['Name']
def get_cat_count(self, categoryName):
with db_session:
category = count(c for c in Category
if c.Name == categoryName
and c.Id > 0)
return category
def get_folder(self, cat_id, folderName):
with db_session:
folders = count(f.Folder for f in Fanfic
if f.Category_Id == cat_id
and f.Folder == folderName)
return folders
def get_cat_id(self, categoryName):
with db_session:
cat = Category.get(Name=categoryName)
ret = cat.Id
return ret
def get_fanfics(self, cat_id):
d = defaultdict(list)
ret = []
with db_session:
ff = select(f for f in Fanfic
if f.Abandoned == 0
and f.Complete == 0
and f.Category_Id == cat_id)
ff = ff.order_by(Fanfic.Folder)
for f in ff:
d = defaultdict(list)
for k, v in f.to_dict().items():
d[k].append(v)
ret.append(d)
return ret
def update_chapter_count(self, fic_id, chapters):
with db_session:
ff = Fanfic.get(Id=fic_id)
ff.Count = chapters
def update_last_checked(self, fic_id, last):
with db_session:
ff = Fanfic.get(Id=fic_id)
ff.Last_Checked = last
def update_last_updated(self, fic_id, update):
with db_session:
ff = Fanfic.get(Id=fic_id)
ff.Update_Date = update
def download_stories(args):
# Ensure download folder exists prior to starting download.
location = os.path.join(basePath, arcRoot, dl_folder)
ensure_dir(location)
cat = db.get_categories()
for cat_name in cat:
# print("Processing:", cat_name)
download_by_category(cat_name)
convert_by_category(cat_name)
def download_by_category(category):
potential_keys = []
cat_folder = category.replace(' ', '_')
location = os.path.join(basePath, arcRoot, dl_folder, cat_folder)
# Ensure category folder exists prior to starting download.
ensure_dir(location)
cat_id = db.get_cat_id(category)
fics = db.get_fanfics(cat_id)
fic_idx = 0
fic_count = len(fics)
msg = "Processing: {0} - {1} of {2}."
for fic in fics:
fic_idx = fic_idx + 1
print(msg.format(category, fic_idx, fic_count), end='\r')
title = fic['Title'][0]
author = fic['Author'][0]
folder = fic['Folder'][0]
chapters = fic['Count'][0]
update = fic['Update_Date'][0]
last = fic['Last_Checked'][0]
if fic['Internet'][0]:
tmp = fic['Internet'][0]
tmp2 = tmp.split('#')
if len(tmp2) > 1:
url = fic['Internet'][0].split('#')[1]
else:
url = tmp
else:
url = ''
storyid = fic['StoryId'][0]
fic_id = fic['Id'][0]
date_check = IsNotStale(update, last)
url_check = check_url(url)
if url_check is True and date_check is True:
ffargs = set_ffargs(location, folder)
next_chapter = chapters + 1
# Initialize class so we can retrieve actual story url
fanfic = current_fanfic.fanfic(url, ffargs)
# Get fanfic url
url = fanfic.get_story_url(storyid)
# Initialize class with correct url.
fanfic = current_fanfic.fanfic(url, ffargs)
# Download list of chapters
fanfic.get_chapters()
# Get current chapter count from site
cc = fanfic.chapter_count
if cc >= next_chapter:
if cc == next_chapter:
potential_keys = [next_chapter]
else:
potential_keys = [
i * 1 for i in range(next_chapter, (cc + 1))]
# Show story about to be updated.
update = fanfic.get_update_date()
print('', end='\n')
print(title, '/', folder, '/', update)
# Set chapters to download
fanfic.set_download_chapters(potential_keys)
# Download Story
fanfic.download_fanfic()
print('Downloaded fanfic: ', category, '/', title, '/', folder)
db.update_chapter_count(fic_id, cc)
db.update_last_updated(fic_id, update)
db.update_last_checked(fic_id, date.today())
else:
db.update_last_checked(fic_id, date.today())
sleep(4)
else:
sleep(0.2)
print('', end='\n')
def convert_by_category(category):
cat_folder = category.replace(' ', '_')
location = os.path.join(basePath, arcRoot, dl_folder, cat_folder)
folders = get_subfolders(location)
for folder in folders:
target = os.path.realpath(os.path.join(location, folder))
files = get_files(target, "*.htm")
for htm in files:
convert_file(target, htm)
def add_story(category, cat_id, folder):
_ = os.system("clear")
url = input("Enter Story URL: ")
url_check = check_url(url)
if url_check:
ffargs = set_ffargs('/tmp', folder)
fanfic = current_fanfic.fanfic(url, ffargs)
info = fanfic.story_info()
print("ID: {0}".format(info['StoryId']))
print("Title: {0}".format(info['Title']))
print("Author: {0}".format(info['Author']))
print("Summary: ", textwrap.fill(info['Description']))
print("Published: {0}".format(info['Publish_Date']))
print("Updated: {0}".format(info['Update_Date']))
print("Chapter Count: {0}".format(info['Count']))
cui.pause()
confirm = cui.menu_yesno("Proceed?")
if confirm == "Yes":
db.add_story(folder, cat_id, url, info)
print("Story added to database.")
cui.pause()
else:
print("Site not supported!!!")
cui.pause()
def add_cat():
_ = os.system("clear")
category = input("Enter Category Name: ")
cat_count = db.get_cat_count(category)
if cat_count > 0:
print("Category: {0} already exists!".format(category))
category = "None"
cui.pause()
else:
db.add_cat(category)
return category
def config_menu():
category = "None"
folder = "None"
while True:
menu = []
menu.append(cui.get_entry("Category: {0}", category))
menu.append(cui.get_entry("Folder: {0}", folder))
menu.append("Add Category")
menu.append("Add Story")
menu.append("Exit")
_ = os.system("clear")
print("Fanfic Downloader - Config")
print("")
ret = cui.submenu(menu, "Enter Selection")
if "Category:" in ret:
cat = db.get_categories()
_ = os.system("clear")
category = cui.submenu(cat, "Choose Category: ")
cat_id = db.get_cat_id(category)
if "Folder:" in ret:
if category != "None":
folder = input("Enter Folder Name: ")
folder_count = db.get_folder(cat_id, folder)
if folder_count > 0:
print("Folder: {0} already exists!".format(folder))
folder = "None"
cui.pause()
else:
print("Please select category first!")
cui.pause()
if ret == "Add Category":
category = add_cat()
if ret == "Add Story":
if category != "None" and folder != "None":
add_story(category, cat_id, folder)
folder = "None"
else:
print("Please ensure Category and Folder are not set to None!")
cui.pause()
if ret == "Exit":
sys.exit(0)
def main():
global db
parser = argparse.ArgumentParser(
description=(
'Automates fanfic downloads latest chapters using fanfic_scraper. '
'(Currently works with fanfiction.net)'))
parser.add_argument(
"-c", '--config', required=False, action='store_true',
help="Configures Fanfic Downloader.")
config_load()
db = FanficDB()
args = parser.parse_args()
if args.config:
config_menu()
else:
download_stories(args)
if __name__ == "__main__":
sys.exit(main())
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""`LinearOperator` acting like a lower triangular matrix."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg_impl as linalg
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"LinearOperatorLowerTriangular",
]
@tf_export("linalg.LinearOperatorLowerTriangular")
class LinearOperatorLowerTriangular(linear_operator.LinearOperator):
"""`LinearOperator` acting like a [batch] square lower triangular matrix.
This operator acts like a [batch] lower triangular matrix `A` with shape
`[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `N x N` matrix.
`LinearOperatorLowerTriangular` is initialized with a `Tensor` having
dimensions `[B1,...,Bb, N, N]`. The upper triangle of the last two
dimensions is ignored.
```python
# Create a 2 x 2 lower-triangular linear operator.
tril = [[1., 2.], [3., 4.]]
operator = LinearOperatorLowerTriangular(tril)
# The upper triangle is ignored.
operator.to_dense()
==> [[1., 0.]
[3., 4.]]
operator.shape
==> [2, 2]
operator.log_abs_determinant()
==> scalar Tensor
x = ... Shape [2, 4] Tensor
operator.matmul(x)
==> Shape [2, 4] Tensor
# Create a [2, 3] batch of 4 x 4 linear operators.
tril = tf.random.normal(shape=[2, 3, 4, 4])
operator = LinearOperatorLowerTriangular(tril)
```
#### Shape compatibility
This operator acts on [batch] matrix with compatible shape.
`x` is a batch matrix with compatible shape for `matmul` and `solve` if
```
operator.shape = [B1,...,Bb] + [N, N], with b >= 0
x.shape = [B1,...,Bb] + [N, R], with R >= 0.
```
#### Performance
Suppose `operator` is a `LinearOperatorLowerTriangular` of shape `[N, N]`,
and `x.shape = [N, R]`. Then
* `operator.matmul(x)` involves `N^2 * R` multiplications.
* `operator.solve(x)` involves `N * R` size `N` back-substitutions.
* `operator.determinant()` involves a size `N` `reduce_prod`.
If instead `operator` and `x` have shape `[B1,...,Bb, N, N]` and
`[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
tril,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name="LinearOperatorLowerTriangular"):
r"""Initialize a `LinearOperatorLowerTriangular`.
Args:
tril: Shape `[B1,...,Bb, N, N]` with `b >= 0`, `N >= 0`.
The lower triangular part of `tril` defines this operator. The strictly
upper triangle is ignored.
is_non_singular: Expect that this operator is non-singular.
This operator is non-singular if and only if its diagonal elements are
all non-zero.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose. This operator is self-adjoint only if it is diagonal with
real-valued diagonal entries. In this case it is advised to use
`LinearOperatorDiag`.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`.
Raises:
ValueError: If `is_square` is `False`.
"""
if is_square is False:
raise ValueError(
"Only square lower triangular operators supported at this time.")
is_square = True
with ops.name_scope(name, values=[tril]):
self._tril = linear_operator_util.convert_nonref_to_tensor(tril,
name="tril")
self._check_tril(self._tril)
super(LinearOperatorLowerTriangular, self).__init__(
dtype=self._tril.dtype,
graph_parents=None,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name)
self._set_graph_parents([self._tril])
def _check_tril(self, tril):
"""Static check of the `tril` argument."""
if tril.shape.ndims is not None and tril.shape.ndims < 2:
raise ValueError(
"Argument tril must have at least 2 dimensions. Found: %s"
% tril)
def _get_tril(self):
"""Gets the `tril` kwarg, with upper part zero-d out."""
return array_ops.matrix_band_part(self._tril, -1, 0)
def _get_diag(self):
"""Gets the diagonal part of `tril` kwarg."""
return array_ops.matrix_diag_part(self._tril)
def _shape(self):
return self._tril.shape
def _shape_tensor(self):
return array_ops.shape(self._tril)
def _assert_non_singular(self):
return linear_operator_util.assert_no_entries_with_modulus_zero(
self._get_diag(),
message="Singular operator: Diagonal contained zero values.")
def _matmul(self, x, adjoint=False, adjoint_arg=False):
return math_ops.matmul(
self._get_tril(), x, adjoint_a=adjoint, adjoint_b=adjoint_arg)
def _determinant(self):
return math_ops.reduce_prod(self._get_diag(), axis=[-1])
def _log_abs_determinant(self):
return math_ops.reduce_sum(
math_ops.log(math_ops.abs(self._get_diag())), axis=[-1])
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
rhs = linalg.adjoint(rhs) if adjoint_arg else rhs
return linalg.triangular_solve(
self._get_tril(), rhs, lower=True, adjoint=adjoint)
def _to_dense(self):
return self._get_tril()
def _eigvals(self):
return self._get_diag()
| |
"""Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy import stats
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
# avoid StratifiedKFold's Warning about least populated class in y
y = np.arange(10) % 3
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 3]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Check that errors are raised if all n_labels for individual
# classes are less than n_folds.
y = [3, 3, -1, -1, 2]
assert_raises(ValueError, cval.StratifiedKFold, y, 3)
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
error_string = ("k-fold cross validation requires at least one"
" train / test split")
assert_raise_message(ValueError, error_string,
cval.StratifiedKFold, y, 0)
assert_raise_message(ValueError, error_string,
cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
assert_true(np.any(np.arange(100) != ind[test]))
assert_true(np.any(np.arange(100, 200) != ind[test]))
assert_true(np.any(np.arange(200, 300) != ind[test]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_label_kfold():
rng = np.random.RandomState(0)
# Parameters of the test
n_labels = 15
n_samples = 1000
n_folds = 5
# Construct the test data
tolerance = 0.05 * n_samples # 5 percent error allowed
labels = rng.randint(0, n_labels, n_samples)
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
labels = np.asarray(labels, dtype=object)
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Construct the test data
labels = ['Albert', 'Jean', 'Bertrand', 'Michel', 'Jean',
'Francis', 'Robert', 'Michel', 'Rachel', 'Lois',
'Michelle', 'Bernard', 'Marion', 'Laura', 'Jean',
'Rachel', 'Franck', 'John', 'Gael', 'Anna', 'Alix',
'Robert', 'Marion', 'David', 'Tony', 'Abel', 'Becky',
'Madmood', 'Cary', 'Mary', 'Alexandre', 'David', 'Francis',
'Barack', 'Abdoul', 'Rasha', 'Xi', 'Silvia']
labels = np.asarray(labels, dtype=object)
n_labels = len(np.unique(labels))
n_samples = len(labels)
n_folds = 5
tolerance = 0.05 * n_samples # 5 percent error allowed
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Should fail if there are more folds than labels
labels = np.array([1, 1, 1, 2, 2])
assert_raises(ValueError, cval.LabelKFold, labels, n_folds=3)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1])
/ float(len(y[train])))
p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1])
/ float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_stratified_shuffle_split_overlap_train_test_bug():
# See https://github.com/scikit-learn/scikit-learn/issues/6121 for
# the original bug report
labels = [0, 1, 2, 3] * 3 + [4, 5] * 5
splits = cval.StratifiedShuffleSplit(labels, n_iter=1,
test_size=0.5, random_state=0)
train, test = next(iter(splits))
assert_array_equal(np.intersect1d(train, test), [])
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_label_shuffle_split():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
]
for y in ys:
n_iter = 6
test_size = 1. / 3
slo = cval.LabelShuffleSplit(y, n_iter, test_size=test_size,
random_state=0)
# Make sure the repr works
repr(slo)
# Test that the length is correct
assert_equal(len(slo), n_iter)
y_unique = np.unique(y)
for train, test in slo:
# First test: no train label is in the test set and vice versa
y_train_unique = np.unique(y[train])
y_test_unique = np.unique(y[test])
assert_false(np.any(np.in1d(y[train], y_test_unique)))
assert_false(np.any(np.in1d(y[test], y_train_unique)))
# Second test: train and test add up to all the data
assert_equal(y[train].size + y[test].size, y.size)
# Third test: train and test are disjoint
assert_array_equal(np.intersect1d(train, test), [])
# Fourth test: # unique train and test labels are correct,
# +- 1 for rounding error
assert_true(abs(len(y_test_unique) -
round(test_size * len(y_unique))) <= 1)
assert_true(abs(len(y_train_unique) -
round((1.0 - test_size) * len(y_unique))) <= 1)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_multilabel = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [0, 1, 1], [1, 0, 0]]
cv = cval.check_cv(3, X, y_multilabel, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cval.cross_val_predict(classif, X, y, cv=10)
preds_sparse = cval.cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
| |
"""
This module makes simple LDAP queries simple.
"""
import ldap
from simpleldap.cidict import cidict
#
# Exceptions.
#
class SimpleLDAPException(Exception):
"""Base class for all simpleldap exceptions."""
class ObjectNotFound(SimpleLDAPException):
"""
Exception when no objects were returned, but was expecting a single item.
"""
class MultipleObjectsFound(SimpleLDAPException):
"""
Exception for when multiple objects were returned, but was expecting only
a single item.
"""
class ConnectionException(Exception):
"""Base class for all Connection object exceptions."""
class InvalidEncryptionProtocol(ConnectionException):
"""Exception when given an unsupported encryption protocol."""
#
# Classes.
#
class LDAPItem(cidict):
"""
A convenience class for wrapping standard LDAPResult objects.
"""
def __init__(self, result):
super(LDAPItem, self).__init__()
self.dn, self.attributes = result
# XXX: quick and dirty, should really proxy straight to the existing
# self.attributes dict.
for attribute, values in self.attributes.iteritems():
# Make the entire list of values for each LDAP attribute
# accessible through a dictionary mapping.
self[attribute] = values
def first(self, attribute):
"""
Return the first value for the given LDAP attribute.
"""
return self[attribute][0]
def value_contains(self, value, attribute):
"""
Determine if any of the items in the value list for the given
attribute contain value.
"""
for item in self[attribute]:
if value in item:
return True
return False
def __str__(self):
"""
Print attribute names and values, one per line, in alphabetical order.
Attribute names are displayed right-aligned to the length of the
longest attribute name.
"""
attributes = self.keys()
longestKeyLength = max([len(attr) for attr in attributes])
output = []
for attr in sorted(attributes):
values = ("\n%*s " % (longestKeyLength, ' ')).join(self[attr])
output.append("%*s: %s" % (longestKeyLength, attr, values))
return "\n".join(output)
def __eq__(self, other):
return self.dn == other.dn
class Connection(object):
"""
A connection to an LDAP server.
"""
# The class to use for items returned in results. Subclasses can change
# this to a class of their liking.
result_item_class = LDAPItem
# List of exceptions to treat as a failed bind operation in the
# authenticate method.
failed_authentication_exceptions = [
ldap.NO_SUCH_OBJECT, # e.g. dn matches no objects.
ldap.UNWILLING_TO_PERFORM, # e.g. dn with no password.
ldap.INVALID_CREDENTIALS, # e.g. wrong password.
]
def __init__(self, hostname='localhost', port=None, dn='', password='',
encryption=None, require_cert=None, debug=False,
initialize_kwargs=None, options=None, search_defaults=None):
"""
Bind to hostname:port using the passed distinguished name (DN), as
``dn``, and password.
If ``hostname`` is not given, default to ``'localhost'``.
If no user and password is given, try to connect anonymously with a
blank DN and password.
``encryption`` should be one of ``'tls'``, ``'ssl'``, or ``None``.
If ``'tls'``, then the standard port 389 is used by default and after
binding, tls is started. If ``'ssl'``, then port 636 is used by
default. ``port`` can optionally be given for connecting to a
non-default port.
``require_cert`` is None by default. Set this to ``True`` or
``False`` to set the ``OPT_X_TLS_REQUIRE_CERT`` ldap option.
If ``debug`` is ``True``, debug options are turned on within ldap and
statements are ouput to standard error. Default is ``False``.
If given, ``options`` should be a dictionary of any additional
connection-specific ldap options to set, e.g.:
``{'OPT_TIMELIMIT': 3}``.
If given, ``search_defaults`` should be a dictionary of default
parameters to be passed to the search method.
"""
if search_defaults is None:
self._search_defaults = {}
else:
self._search_defaults = search_defaults
if not encryption or encryption == 'tls':
protocol = 'ldap'
if not port:
port = 389
elif encryption == 'ssl':
protocol = 'ldaps'
if not port:
port = 636
else:
raise InvalidEncryptionProtocol(
"Invalid encryption protocol, must be one of: 'tls' or 'ssl'.")
if require_cert is not None:
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, require_cert)
if debug:
ldap.set_option(ldap.OPT_DEBUG_LEVEL, 255)
else:
ldap.set_option(ldap.OPT_DEBUG_LEVEL, 0)
uri = '%s://%s:%s' % (protocol, hostname, port)
if initialize_kwargs:
self.connection = ldap.initialize(uri, **initialize_kwargs)
else:
self.connection = ldap.initialize(uri)
if options:
for name, value in options.iteritems():
self.connection.set_option(getattr(ldap, name), value)
if encryption == 'tls':
self.connection.start_tls_s()
self.connection.simple_bind_s(dn, password)
def set_search_defaults(self, **kwargs):
"""
Set defaults for search.
Examples::
conn.set_search_defaults(base_dn='dc=example,dc=com', timeout=100)
conn.set_search_defaults(attrs=['cn'], scope=ldap.SCOPE_BASE)
"""
self._search_defaults.update(kwargs)
def clear_search_defaults(self, args=None):
"""
Clear all search defaults specified by the list of parameter names
given as ``args``. If ``args`` is not given, then clear all existing
search defaults.
Examples::
conn.set_search_defaults(scope=ldap.SCOPE_BASE, attrs=['cn'])
conn.clear_search_defaults(['scope'])
conn.clear_search_defaults()
"""
if args is None:
self._search_defaults.clear()
else:
for arg in args:
if arg in self._search_defaults:
del self._search_defaults[arg]
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""
Shutdown the connection.
"""
self.connection.unbind_s()
def search(self, filter, base_dn=None, attrs=None, scope=None,
timeout=None, limit=None):
"""
Search the directory.
"""
if base_dn is None:
base_dn = self._search_defaults.get('base_dn', '')
if attrs is None:
attrs = self._search_defaults.get('attrs', None)
if scope is None:
scope = self._search_defaults.get('scope', ldap.SCOPE_SUBTREE)
if timeout is None:
timeout = self._search_defaults.get('timeout', -1)
if limit is None:
limit = self._search_defaults.get('limit', 0)
results = self.connection.search_ext_s(
base_dn, scope, filter, attrs, timeout=timeout, sizelimit=limit)
return self.to_items(results)
def get(self, *args, **kwargs):
"""
Get a single object.
This is a convenience wrapper for the search method that checks that
only one object was returned, and returns that single object instead
of a list. This method takes the exact same arguments as search.
"""
results = self.search(*args, **kwargs)
num_results = len(results)
if num_results == 1:
return results[0]
if num_results > 1:
raise MultipleObjectsFound()
raise ObjectNotFound()
def to_items(self, results):
"""
Turn LDAPResult objects returned from the ldap library into more
convenient objects.
"""
return [self.result_item_class(item) for item in results]
def authenticate(self, dn='', password=''):
"""
Attempt to authenticate given dn and password using a bind operation.
Return True if the bind is successful, and return False there was an
exception raised that is contained in
self.failed_authentication_exceptions.
"""
try:
self.connection.simple_bind_s(dn, password)
except tuple(self.failed_authentication_exceptions):
return False
else:
return True
def compare(self, dn, attr, value):
"""
Compare the ``attr`` of the entry ``dn`` with given ``value``.
This is a convenience wrapper for the ldap library's ``compare``
function that returns a boolean value instead of 1 or 0.
"""
return self.connection.compare_s(dn, attr, value) == 1
| |
# Copyright (c) 2012 - 2014 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Driver for EMC XtremIO Storage.
supported XtremIO version 2.4 and up
1.0.0 - initial release
1.0.1 - enable volume extend
1.0.2 - added FC support, improved error handling
1.0.3 - update logging level, add translation
1.0.4 - support for FC zones
1.0.5 - add support for XtremIO 4.0
"""
import base64
import json
import math
import random
import string
import urllib
import urllib2
from oslo_config import cfg
from oslo_utils import units
import six
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import log as logging
from cinder.volume import driver
from cinder.volume.drivers.san import san
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
DEFAULT_PROVISIONING_FACTOR = 20.0
XTREMIO_OPTS = [
cfg.StrOpt('xtremio_cluster_name',
default='',
help='XMS cluster id in multi-cluster environment')]
CONF.register_opts(XTREMIO_OPTS)
RANDOM = random.Random()
OBJ_NOT_FOUND_ERR = 'obj_not_found'
VOL_NOT_UNIQUE_ERR = 'vol_obj_name_not_unique'
VOL_OBJ_NOT_FOUND_ERR = 'vol_obj_not_found'
ALREADY_MAPPED_ERR = 'already_mapped'
class XtremIOClient(object):
def __init__(self, configuration, cluster_id):
self.configuration = configuration
self.cluster_id = cluster_id
self.base64_auth = (base64
.encodestring('%s:%s' %
(self.configuration.san_login,
self.configuration.san_password))
.replace('\n', ''))
self.base_url = ('https://%s/api/json/types' %
self.configuration.san_ip)
def _create_request(self, request_typ, data, url, url_data):
if request_typ in ('GET', 'DELETE'):
data.update(url_data)
self.update_url(data, self.cluster_id)
url = '%(url)s?%(query)s' % {'query': urllib.urlencode(data,
doseq=True),
'url': url}
request = urllib2.Request(url)
else:
if url_data:
url = ('%(url)s?%(query)s' %
{'query': urllib.urlencode(url_data, doseq=True),
'url': url})
self.update_data(data, self.cluster_id)
LOG.debug('data: %s', data)
request = urllib2.Request(url, json.dumps(data))
LOG.debug('%(type)s %(url)s', {'type': request_typ, 'url': url})
def get_request_type():
return request_typ
request.get_method = get_request_type
request.add_header("Authorization", "Basic %s" % (self.base64_auth, ))
return request
def _send_request(self, object_type, key, request):
try:
response = urllib2.urlopen(request)
except (urllib2.HTTPError, ) as exc:
if exc.code == 400 and hasattr(exc, 'read'):
error = json.load(exc)
err_msg = error['message']
if err_msg.endswith(OBJ_NOT_FOUND_ERR):
LOG.warning(_LW("object %(key)s of "
"type %(typ)s not found"),
{'key': key, 'typ': object_type})
raise exception.NotFound()
elif err_msg == VOL_NOT_UNIQUE_ERR:
LOG.error(_LE("can't create 2 volumes with the same name"))
msg = (_('Volume by this name already exists'))
raise exception.VolumeBackendAPIException(data=msg)
elif err_msg == VOL_OBJ_NOT_FOUND_ERR:
LOG.error(_LE("Can't find volume to map %s"), key)
raise exception.VolumeNotFound(volume_id=key)
elif ALREADY_MAPPED_ERR in err_msg:
raise exception.XtremIOAlreadyMappedError()
LOG.error(_LE('Bad response from XMS, %s'), exc.read())
msg = (_('Exception: %s') % six.text_type(exc))
raise exception.VolumeDriverException(message=msg)
if response.code >= 300:
LOG.error(_LE('bad API response, %s'), response.msg)
msg = (_('bad response from XMS got http code %(code)d, %(msg)s') %
{'code': response.code, 'msg': response.msg})
raise exception.VolumeBackendAPIException(data=msg)
return response
def req(self, object_type='volumes', request_typ='GET', data=None,
name=None, idx=None):
if not data:
data = {}
if name and idx:
msg = _("can't handle both name and index in req")
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
url = '%s/%s' % (self.base_url, object_type)
url_data = {}
key = None
if name:
url_data['name'] = name
key = name
elif idx:
url = '%s/%d' % (url, idx)
key = str(idx)
request = self._create_request(request_typ, data, url, url_data)
response = self._send_request(object_type, key, request)
str_result = response.read()
if str_result:
try:
return json.loads(str_result)
except Exception:
LOG.exception(_LE('querying %(typ)s, %(req)s failed to '
'parse result, return value = %(res)s'),
{'typ': object_type,
'req': request_typ,
'res': str_result})
def update_url(self, data, cluster_id):
return
def update_data(self, data, cluster_id):
return
def get_cluster(self):
return self.req('clusters', idx=1)['content']
class XtremIOClient3(XtremIOClient):
def find_lunmap(self, ig_name, vol_name):
try:
for lm_link in self.req('lun-maps')['lun-maps']:
idx = lm_link['href'].split('/')[-1]
lm = self.req('lun-maps', idx=int(idx))['content']
if lm['ig-name'] == ig_name and lm['vol-name'] == vol_name:
return lm
except exception.NotFound:
raise (exception.VolumeDriverException
(_("can't find lunmap, ig:%(ig)s vol:%(vol)s") %
{'ig': ig_name, 'vol': vol_name}))
def num_of_mapped_volumes(self, initiator):
cnt = 0
for lm_link in self.req('lun-maps')['lun-maps']:
idx = lm_link['href'].split('/')[-1]
lm = self.req('lun-maps', idx=int(idx))['content']
if lm['ig-name'] == initiator:
cnt += 1
return cnt
def get_iscsi_portal(self):
iscsi_portals = [t['name'] for t in self.req('iscsi-portals')
['iscsi-portals']]
# Get a random portal
portal_name = RANDOM.choice(iscsi_portals)
try:
portal = self.req('iscsi-portals',
name=portal_name)['content']
except exception.NotFound:
raise (exception.VolumeBackendAPIException
(data=_("iscsi portal, %s, not found") % portal_name))
return portal
class XtremIOClient4(XtremIOClient):
def find_lunmap(self, ig_name, vol_name):
try:
return (self.req('lun-maps',
data={'full': 1,
'filter': ['vol-name:eq:%s' % vol_name,
'ig-name:eq:%s' % ig_name]})
['lun-maps'][0])
except (KeyError, IndexError):
raise exception.VolumeNotFound(volume_id=vol_name)
def num_of_mapped_volumes(self, initiator):
return len(self.req('lun-maps',
data={'filter': 'ig-name:eq:%s' % initiator})
['lun-maps'])
def update_url(self, data, cluster_id):
if cluster_id:
data['cluster-name'] = cluster_id
def update_data(self, data, cluster_id):
if cluster_id:
data['cluster-id'] = cluster_id
def get_iscsi_portal(self):
iscsi_portals = self.req('iscsi-portals',
data={'full': 1})['iscsi-portals']
return RANDOM.choice(iscsi_portals)
def get_cluster(self):
if self.cluster_id:
return self.req('clusters', name=self.cluster_id)['content']
else:
name = self.req('clusters')['clusters'][0]['name']
return self.req('clusters', name=name)['content']
class XtremIOVolumeDriver(san.SanDriver):
"""Executes commands relating to Volumes."""
VERSION = '1.0.5'
driver_name = 'XtremIO'
MIN_XMS_VERSION = [3, 0, 0]
def __init__(self, *args, **kwargs):
super(XtremIOVolumeDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(XTREMIO_OPTS)
self.protocol = None
self.backend_name = (self.configuration.safe_get('volume_backend_name')
or self.driver_name)
self.cluster_id = (self.configuration.safe_get('xtremio_cluster_name')
or '')
self.provisioning_factor = (self.configuration.
safe_get('max_over_subscription_ratio')
or DEFAULT_PROVISIONING_FACTOR)
self._stats = {}
self.client = XtremIOClient3(self.configuration, self.cluster_id)
def _obj_from_result(self, res):
typ, idx = res['links'][0]['href'].split('/')[-2:]
return self.client.req(typ, idx=int(idx))['content']
def check_for_setup_error(self):
try:
try:
xms = self.client.req('xms', idx=1)['content']
version_text = xms['version']
except exception.VolumeDriverException:
cluster = self.client.req('clusters', idx=1)['content']
version_text = cluster['sys-sw-version']
except exception.NotFound:
msg = _("XtremIO not initialized correctly, no clusters found")
raise (exception.VolumeBackendAPIException
(data=msg))
ver = [int(n) for n in version_text.split('-')[0].split('.')]
if ver < self.MIN_XMS_VERSION:
msg = (_('Invalid XtremIO version %(cur)s,'
' version %(min)s or up is required') %
{'min': self.MIN_XMS_VERSION,
'cur': ver})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
else:
LOG.info(_LI('XtremIO SW version %s'), version_text)
if ver[0] >= 4:
self.client = XtremIOClient4(self.configuration, self.cluster_id)
def create_volume(self, volume):
"Creates a volume"
data = {'vol-name': volume['id'],
'vol-size': str(volume['size']) + 'g'
}
self.client.req('volumes', 'POST', data)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
data = {'snap-vol-name': volume['id'],
'ancestor-vol-id': snapshot.id}
self.client.req('snapshots', 'POST', data)
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
data = {'snap-vol-name': volume['id'],
'ancestor-vol-id': src_vref['id']}
self.client.req('snapshots', 'POST', data)
def delete_volume(self, volume):
"""Deletes a volume."""
try:
self.client.req('volumes', 'DELETE', name=volume['id'])
except exception.NotFound:
LOG.info(_LI("volume %s doesn't exist"), volume['id'])
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
data = {'snap-vol-name': snapshot.id,
'ancestor-vol-id': snapshot.volume_id}
self.client.req('snapshots', 'POST', data)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
try:
self.client.req('volumes', 'DELETE', name=snapshot.id)
except exception.NotFound:
LOG.info(_LI("snapshot %s doesn't exist"), snapshot.id)
def _update_volume_stats(self):
sys = self.client.get_cluster()
physical_space = int(sys["ud-ssd-space"]) / units.Mi
used_physical_space = int(sys["ud-ssd-space-in-use"]) / units.Mi
free_physical = physical_space - used_physical_space
actual_prov = int(sys["vol-size"]) / units.Mi
self._stats = {'volume_backend_name': self.backend_name,
'vendor_name': 'EMC',
'driver_version': self.VERSION,
'storage_protocol': self.protocol,
'total_capacity_gb': physical_space,
'free_capacity_gb': (free_physical *
self.provisioning_factor),
'provisioned_capacity_gb': actual_prov,
'max_over_subscription_ratio': self.provisioning_factor,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'reserved_percentage':
self.configuration.reserved_percentage,
'QoS_support': False}
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self._update_volume_stats()
return self._stats
def manage_existing(self, volume, existing_ref):
"""Manages an existing LV."""
lv_name = existing_ref['source-name']
# Attempt to locate the volume.
try:
vol_obj = self.client.req('volumes', name=lv_name)['content']
except exception.NotFound:
kwargs = {'existing_ref': lv_name,
'reason': 'Specified logical volume does not exist.'}
raise exception.ManageExistingInvalidReference(**kwargs)
# Attempt to rename the LV to match the OpenStack internal name.
self.client.req('volumes', 'PUT', data={'vol-name': volume['id']},
idx=vol_obj['index'])
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of an existing LV for manage_existing."""
# Check that the reference is valid
if 'source-name' not in existing_ref:
reason = _('Reference must contain source-name element.')
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=reason)
lv_name = existing_ref['source-name']
# Attempt to locate the volume.
try:
vol_obj = self.client.req('volumes', name=lv_name)['content']
except exception.NotFound:
kwargs = {'existing_ref': lv_name,
'reason': 'Specified logical volume does not exist.'}
raise exception.ManageExistingInvalidReference(**kwargs)
# LV size is returned in gigabytes. Attempt to parse size as a float
# and round up to the next integer.
lv_size = int(math.ceil(int(vol_obj['vol-size']) / units.Mi))
return lv_size
def unmanage(self, volume):
"""Removes the specified volume from Cinder management."""
# trying to rename the volume to [cinder name]-unmanged
try:
self.client.req('volumes', 'PUT', name=volume['id'],
data={'vol-name': volume['name'] + '-unmanged'})
except exception.NotFound:
LOG.info(_LI("Volume with the name %s wasn't found,"
" can't unmanage"),
volume['id'])
raise exception.VolumeNotFound(volume_id=volume['id'])
def extend_volume(self, volume, new_size):
"""Extend an existing volume's size."""
data = {'vol-size': six.text_type(new_size) + 'g'}
try:
self.client.req('volumes', 'PUT', data, name=volume['id'])
except exception.NotFound:
msg = _("can't find the volume to extend")
raise (exception.VolumeDriverException(message=msg))
def check_for_export(self, context, volume_id):
"""Make sure volume is exported."""
pass
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector"""
try:
ig = self.client.req('initiator-groups',
name=self._get_ig(connector))['content']
tg = self.client.req('target-groups', name='Default')['content']
vol = self.client.req('volumes', name=volume['id'])['content']
lm_name = '%s_%s_%s' % (six.text_type(vol['index']),
six.text_type(ig['index'])
if ig else 'any',
six.text_type(tg['index']))
LOG.debug('removing lun map %s', lm_name)
self.client.req('lun-maps', 'DELETE', name=lm_name)
except exception.NotFound:
LOG.warning(_LW("terminate_connection: lun map not found"))
def _get_password(self):
return ''.join(RANDOM.choice
(string.ascii_uppercase + string.digits)
for _ in range(12))
def create_lun_map(self, volume, ig):
try:
res = self.client.req('lun-maps', 'POST',
{'ig-id': ig['ig-id'][2],
'vol-id': volume['id']})
lunmap = self._obj_from_result(res)
LOG.info(_LI('created lunmap\n%s'), lunmap)
except exception.XtremIOAlreadyMappedError:
LOG.info(_LI('volume already mapped,'
' trying to retrieve it %(ig)s, %(vol)d'),
{'ig': ig['ig-id'][1], 'vol': volume['id']})
lunmap = self.client.find_lunmap(ig['ig-id'][1], volume['id'])
return lunmap
def _get_ig(self, connector):
raise NotImplementedError()
class XtremIOISCSIDriver(XtremIOVolumeDriver, driver.ISCSIDriver):
"""Executes commands relating to ISCSI volumes.
We make use of model provider properties as follows:
``provider_location``
if present, contains the iSCSI target information in the same
format as an ietadm discovery
i.e. '<ip>:<port>,<portal> <target IQN>'
``provider_auth``
if present, contains a space-separated triple:
'<auth method> <auth username> <auth password>'.
`CHAP` is the only auth_method in use at the moment.
"""
driver_name = 'XtremIO_ISCSI'
def __init__(self, *args, **kwargs):
super(XtremIOISCSIDriver, self).__init__(*args, **kwargs)
self.protocol = 'iSCSI'
def initialize_connection(self, volume, connector):
try:
sys = self.client.get_cluster()
except exception.NotFound:
msg = _("XtremIO not initialized correctly, no clusters found")
raise exception.VolumeBackendAPIException(data=msg)
use_chap = (sys.get('chap-authentication-mode', 'disabled') !=
'disabled')
discovery_chap = (sys.get('chap-discovery-mode', 'disabled') !=
'disabled')
initiator = self._get_initiator(connector)
try:
# check if the IG already exists
ig = self.client.req('initiator-groups', 'GET',
name=self._get_ig(connector))['content']
except exception.NotFound:
# create an initiator group to hold the the initiator
data = {'ig-name': self._get_ig(connector)}
self.client.req('initiator-groups', 'POST', data)
try:
ig = self.client.req('initiator-groups',
name=self._get_ig(connector))['content']
except exception.NotFound:
raise (exception.VolumeBackendAPIException
(data=_("Failed to create IG, %s") %
self._get_ig(connector)))
try:
init = self.client.req('initiators', 'GET',
name=initiator)['content']
if use_chap:
chap_passwd = init['chap-authentication-initiator-'
'password']
# delete the initiator to create a new one with password
if not chap_passwd:
LOG.info(_LI('initiator has no password while using chap,'
'removing it'))
self.client.req('initiators', 'DELETE', name=initiator)
# check if the initiator already exists
raise exception.NotFound()
except exception.NotFound:
# create an initiator
data = {'initiator-name': initiator,
'ig-id': initiator,
'port-address': initiator}
if use_chap:
data['initiator-authentication-user-name'] = 'chap_user'
chap_passwd = self._get_password()
data['initiator-authentication-password'] = chap_passwd
if discovery_chap:
data['initiator-discovery-user-name'] = 'chap_user'
data['initiator-discovery-'
'password'] = self._get_password()
self.client.req('initiators', 'POST', data)
# lun mappping
lunmap = self.create_lun_map(volume, ig)
properties = self._get_iscsi_properties(lunmap)
if use_chap:
properties['auth_method'] = 'CHAP'
properties['auth_username'] = 'chap_user'
properties['auth_password'] = chap_passwd
LOG.debug('init conn params:\n%s', properties)
return {
'driver_volume_type': 'iscsi',
'data': properties
}
def _get_iscsi_properties(self, lunmap):
"""Gets iscsi configuration
:target_discovered: boolean indicating whether discovery was used
:target_iqn: the IQN of the iSCSI target
:target_portal: the portal of the iSCSI target
:target_lun: the lun of the iSCSI target
:volume_id: the id of the volume (currently used by xen)
:auth_method:, :auth_username:, :auth_password:
the authentication details. Right now, either auth_method is not
present meaning no authentication, or auth_method == `CHAP`
meaning use CHAP with the specified credentials.
:access_mode: the volume access mode allow client used
('rw' or 'ro' currently supported)
"""
portal = self.client.get_iscsi_portal()
ip = portal['ip-addr'].split('/')[0]
properties = {'target_discovered': False,
'target_iqn': portal['port-address'],
'target_lun': lunmap['lun'],
'target_portal': '%s:%d' % (ip, portal['ip-port']),
'access_mode': 'rw'}
return properties
def _get_initiator(self, connector):
return connector['initiator']
def _get_ig(self, connector):
return connector['initiator']
class XtremIOFibreChannelDriver(XtremIOVolumeDriver,
driver.FibreChannelDriver):
def __init__(self, *args, **kwargs):
super(XtremIOFibreChannelDriver, self).__init__(*args, **kwargs)
self.protocol = 'FC'
self._targets = None
def get_targets(self):
if not self._targets:
try:
target_list = self.client.req('targets')["targets"]
targets = [self.client.req('targets',
name=target['name'])['content']
for target in target_list
if '-fc' in target['name']]
self._targets = [target['port-address'].replace(':', '')
for target in targets
if target['port-state'] == 'up']
except exception.NotFound:
raise (exception.VolumeBackendAPIException
(data=_("Failed to get targets")))
return self._targets
@fczm_utils.AddFCZone
def initialize_connection(self, volume, connector):
initiators = self._get_initiator(connector)
ig_name = self._get_ig(connector)
i_t_map = {}
# get or create initiator group
try:
# check if the IG already exists
ig = self.client.req('initiator-groups', name=ig_name)['content']
except exception.NotFound:
# create an initiator group to hold the the initiator
data = {'ig-name': ig_name}
self.client.req('initiator-groups', 'POST', data)
try:
ig = self.client.req('initiator-groups',
name=ig_name)['content']
except exception.NotFound:
raise (exception.VolumeBackendAPIException
(data=_("Failed to create IG, %s") % ig_name))
# get or create all initiators
for initiator in initiators:
try:
self.client.req('initiators', name=initiator)['content']
except exception.NotFound:
# create an initiator
data = {'initiator-name': initiator,
'ig-id': ig['name'],
'port-address': initiator}
self.client.req('initiators', 'POST', data)
i_t_map[initiator] = self.get_targets()
lunmap = self.create_lun_map(volume, ig)
return {'driver_volume_type': 'fibre_channel',
'data': {
'target_discovered': True,
'target_lun': lunmap['lun'],
'target_wwn': self.get_targets(),
'access_mode': 'rw',
'initiator_target_map': i_t_map}}
@fczm_utils.RemoveFCZone
def terminate_connection(self, volume, connector, **kwargs):
(super(XtremIOFibreChannelDriver, self)
.terminate_connection(volume, connector, **kwargs))
num_vols = self.client.num_of_mapped_volumes(self._get_ig(connector))
if num_vols > 0:
data = {}
else:
i_t_map = {}
for initiator in self._get_initiator(connector):
i_t_map[initiator] = self.get_targets()
data = {'target_wwn': self.get_targets(),
'initiator_target_map': i_t_map}
return {'driver_volume_type': 'fibre_channel',
'data': data}
def _get_initiator(self, connector):
return connector['wwpns']
def _get_ig(self, connector):
return connector['host']
| |
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import gettext
import socket
import uuid
from babel import localedata
import mock
import webob
from keystone.common import environment
from keystone.common import wsgi
from keystone import exception
from keystone.openstack.common.fixture import moxstubout
from keystone.openstack.common import gettextutils
from keystone.openstack.common.gettextutils import _
from keystone.openstack.common import jsonutils
from keystone import tests
class FakeApp(wsgi.Application):
def index(self, context):
return {'a': 'b'}
class BaseWSGITest(tests.TestCase):
def setUp(self):
self.app = FakeApp()
super(BaseWSGITest, self).setUp()
def _make_request(self, url='/'):
req = webob.Request.blank(url)
args = {'action': 'index', 'controller': None}
req.environ['wsgiorg.routing_args'] = [None, args]
return req
class ApplicationTest(BaseWSGITest):
def test_response_content_type(self):
req = self._make_request()
resp = req.get_response(self.app)
self.assertEqual(resp.content_type, 'application/json')
def test_query_string_available(self):
class FakeApp(wsgi.Application):
def index(self, context):
return context['query_string']
req = self._make_request(url='/?1=2')
resp = req.get_response(FakeApp())
self.assertEqual(jsonutils.loads(resp.body), {'1': '2'})
def test_headers_available(self):
class FakeApp(wsgi.Application):
def index(self, context):
return context['headers']
app = FakeApp()
req = self._make_request(url='/?1=2')
req.headers['X-Foo'] = "bar"
resp = req.get_response(app)
self.assertIn('X-Foo', eval(resp.body))
def test_render_response(self):
data = {'attribute': 'value'}
body = '{"attribute": "value"}'
resp = wsgi.render_response(body=data)
self.assertEqual(resp.status, '200 OK')
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, body)
self.assertEqual(resp.headers.get('Vary'), 'X-Auth-Token')
self.assertEqual(resp.headers.get('Content-Length'), str(len(body)))
def test_render_response_custom_status(self):
resp = wsgi.render_response(status=(501, 'Not Implemented'))
self.assertEqual(resp.status, '501 Not Implemented')
self.assertEqual(resp.status_int, 501)
def test_render_response_custom_headers(self):
resp = wsgi.render_response(headers=[('Custom-Header', 'Some-Value')])
self.assertEqual(resp.headers.get('Custom-Header'), 'Some-Value')
self.assertEqual(resp.headers.get('Vary'), 'X-Auth-Token')
def test_render_response_no_body(self):
resp = wsgi.render_response()
self.assertEqual(resp.status, '204 No Content')
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.body, '')
self.assertEqual(resp.headers.get('Content-Length'), '0')
self.assertEqual(resp.headers.get('Content-Type'), None)
def test_application_local_config(self):
class FakeApp(wsgi.Application):
def __init__(self, *args, **kwargs):
self.kwargs = kwargs
app = FakeApp.factory({}, testkey="test")
self.assertIn("testkey", app.kwargs)
self.assertEqual("test", app.kwargs["testkey"])
def test_render_exception(self):
e = exception.Unauthorized(message=u'\u7f51\u7edc')
resp = wsgi.render_exception(e)
self.assertEqual(resp.status_int, 401)
def test_render_exception_host(self):
e = exception.Unauthorized(message=u'\u7f51\u7edc')
context = {'host_url': 'http://%s:5000' % uuid.uuid4().hex}
resp = wsgi.render_exception(e, context=context)
self.assertEqual(resp.status_int, 401)
class ExtensionRouterTest(BaseWSGITest):
def test_extensionrouter_local_config(self):
class FakeRouter(wsgi.ExtensionRouter):
def __init__(self, *args, **kwargs):
self.kwargs = kwargs
factory = FakeRouter.factory({}, testkey="test")
app = factory(self.app)
self.assertIn("testkey", app.kwargs)
self.assertEqual("test", app.kwargs["testkey"])
class MiddlewareTest(BaseWSGITest):
def test_middleware_request(self):
class FakeMiddleware(wsgi.Middleware):
def process_request(self, req):
req.environ['fake_request'] = True
return req
req = self._make_request()
resp = FakeMiddleware(None)(req)
self.assertIn('fake_request', resp.environ)
def test_middleware_response(self):
class FakeMiddleware(wsgi.Middleware):
def process_response(self, request, response):
response.environ = {}
response.environ['fake_response'] = True
return response
req = self._make_request()
resp = FakeMiddleware(self.app)(req)
self.assertIn('fake_response', resp.environ)
def test_middleware_bad_request(self):
class FakeMiddleware(wsgi.Middleware):
def process_response(self, request, response):
raise exception.Unauthorized()
req = self._make_request()
req.environ['REMOTE_ADDR'] = '127.0.0.1'
resp = FakeMiddleware(self.app)(req)
self.assertEqual(resp.status_int, exception.Unauthorized.code)
def test_middleware_type_error(self):
class FakeMiddleware(wsgi.Middleware):
def process_response(self, request, response):
raise TypeError()
req = self._make_request()
req.environ['REMOTE_ADDR'] = '127.0.0.1'
resp = FakeMiddleware(self.app)(req)
# This is a validationerror type
self.assertEqual(resp.status_int, exception.ValidationError.code)
def test_middleware_exception_error(self):
exception_str = 'EXCEPTIONERROR'
class FakeMiddleware(wsgi.Middleware):
def process_response(self, request, response):
raise exception.UnexpectedError(exception_str)
def do_request():
req = self._make_request()
resp = FakeMiddleware(self.app)(req)
self.assertEqual(resp.status_int, exception.UnexpectedError.code)
return resp
# Exception data should not be in the message when debug is False
self.config_fixture.config(debug=False)
self.assertNotIn(exception_str, do_request().body)
# Exception data should be in the message when debug is True
self.config_fixture.config(debug=True)
self.assertIn(exception_str, do_request().body)
def test_middleware_local_config(self):
class FakeMiddleware(wsgi.Middleware):
def __init__(self, *args, **kwargs):
self.kwargs = kwargs
factory = FakeMiddleware.factory({}, testkey="test")
app = factory(self.app)
self.assertIn("testkey", app.kwargs)
self.assertEqual("test", app.kwargs["testkey"])
class LocalizedResponseTest(tests.TestCase):
def setUp(self):
super(LocalizedResponseTest, self).setUp()
gettextutils._AVAILABLE_LANGUAGES.clear()
self.addCleanup(gettextutils._AVAILABLE_LANGUAGES.clear)
fixture = self.useFixture(moxstubout.MoxStubout())
self.stubs = fixture.stubs
def _set_expected_languages(self, all_locales=[], avail_locales=None):
# Override localedata.locale_identifiers to return some locales.
def returns_some_locales(*args, **kwargs):
return all_locales
self.stubs.Set(localedata, 'locale_identifiers', returns_some_locales)
# Override gettext.find to return other than None for some languages.
def fake_gettext_find(lang_id, *args, **kwargs):
found_ret = '/keystone/%s/LC_MESSAGES/keystone.mo' % lang_id
if avail_locales is None:
# All locales are available.
return found_ret
languages = kwargs['languages']
if languages[0] in avail_locales:
return found_ret
return None
self.stubs.Set(gettext, 'find', fake_gettext_find)
def test_request_match_default(self):
# The default language if no Accept-Language is provided is None
req = webob.Request.blank('/')
self.assertIsNone(wsgi.best_match_language(req))
def test_request_match_language_expected(self):
# If Accept-Language is a supported language, best_match_language()
# returns it.
self._set_expected_languages(all_locales=['it'])
req = webob.Request.blank('/', headers={'Accept-Language': 'it'})
self.assertEqual(wsgi.best_match_language(req), 'it')
def test_request_match_language_unexpected(self):
# If Accept-Language is a language we do not support,
# best_match_language() returns None.
self._set_expected_languages(all_locales=['it'])
req = webob.Request.blank('/', headers={'Accept-Language': 'zh'})
self.assertIsNone(wsgi.best_match_language(req))
def test_static_translated_string_is_Message(self):
# Statically created message strings are Message objects so that they
# are lazy-translated.
self.assertIsInstance(exception.Unauthorized.message_format,
gettextutils.Message)
def test_dynamic_translated_string_is_Message(self):
# Dynamically created message strings are Message objects so that they
# are lazy-translated.
self.assertIsInstance(_('The resource could not be found.'),
gettextutils.Message)
class ServerTest(tests.TestCase):
def setUp(self):
super(ServerTest, self).setUp()
environment.use_eventlet()
self.host = '127.0.0.1'
self.port = '1234'
@mock.patch('eventlet.listen')
@mock.patch('socket.getaddrinfo')
def test_keepalive_unset(self, mock_getaddrinfo, mock_listen):
mock_getaddrinfo.return_value = [(1, 2, 3, 4, 5)]
mock_sock = mock.Mock()
mock_sock.setsockopt = mock.Mock()
mock_listen.return_value = mock_sock
server = environment.Server(mock.MagicMock(), host=self.host,
port=self.port)
server.start()
self.assertTrue(mock_listen.called)
self.assertFalse(mock_sock.setsockopt.called)
@mock.patch('eventlet.listen')
@mock.patch('socket.getaddrinfo')
def test_keepalive_set(self, mock_getaddrinfo, mock_listen):
mock_getaddrinfo.return_value = [(1, 2, 3, 4, 5)]
mock_sock = mock.Mock()
mock_sock.setsockopt = mock.Mock()
mock_listen.return_value = mock_sock
server = environment.Server(mock.MagicMock(), host=self.host,
port=self.port, keepalive=True)
server.start()
mock_sock.setsockopt.assert_called_once_with(socket.SOL_SOCKET,
socket.SO_KEEPALIVE,
1)
self.assertTrue(mock_listen.called)
@mock.patch('eventlet.listen')
@mock.patch('socket.getaddrinfo')
def test_keepalive_and_keepidle_set(self, mock_getaddrinfo, mock_listen):
mock_getaddrinfo.return_value = [(1, 2, 3, 4, 5)]
mock_sock = mock.Mock()
mock_sock.setsockopt = mock.Mock()
mock_listen.return_value = mock_sock
server = environment.Server(mock.MagicMock(), host=self.host,
port=self.port, keepalive=True,
keepidle=1)
server.start()
# keepidle isn't available in the OS X version of eventlet
if hasattr(socket, 'TCP_KEEPIDLE'):
self.assertEqual(mock_sock.setsockopt.call_count, 2)
# Test the last set of call args i.e. for the keepidle
mock_sock.setsockopt.assert_called_with(socket.IPPROTO_TCP,
socket.TCP_KEEPIDLE,
1)
else:
self.assertEqual(mock_sock.setsockopt.call_count, 1)
self.assertTrue(mock_listen.called)
| |
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Product tests for presto-admin status commands
"""
from nose.plugins.attrib import attr
from tests.no_hadoop_bare_image_provider import NoHadoopBareImageProvider
from tests.product.base_product_case import BaseProductTestCase, \
PRESTO_VERSION, PrestoError
from tests.product.cluster_types import STANDALONE_PA_CLUSTER, STANDALONE_PRESTO_CLUSTER
from tests.product.standalone.presto_installer import StandalonePrestoInstaller
class TestStatus(BaseProductTestCase):
def setUp(self):
super(TestStatus, self).setUp()
self.installer = StandalonePrestoInstaller(self)
def test_status_uninstalled(self):
self.setup_cluster(NoHadoopBareImageProvider(), STANDALONE_PA_CLUSTER)
self.upload_topology()
status_output = self._server_status_with_retries()
self.check_status(status_output, self.not_installed_status())
def test_status_not_started(self):
self.setup_cluster(NoHadoopBareImageProvider(), STANDALONE_PRESTO_CLUSTER)
status_output = self._server_status_with_retries()
self.check_status(status_output, self.not_started_status())
@attr('smoketest')
def test_status_happy_path(self):
self.setup_cluster(NoHadoopBareImageProvider(), STANDALONE_PRESTO_CLUSTER)
self.run_prestoadmin('server start')
status_output = self._server_status_with_retries(check_catalogs=True)
self.check_status(status_output, self.base_status())
def test_status_only_coordinator(self):
self.setup_cluster(NoHadoopBareImageProvider(), STANDALONE_PRESTO_CLUSTER)
self.run_prestoadmin('server start -H master')
# don't run with retries because it won't be able to query the
# coordinator because the coordinator is set to not be a worker
status_output = self.run_prestoadmin('server status')
self.check_status(
status_output,
self.single_node_up_status(self.cluster.internal_master)
)
def test_status_only_worker(self):
self.setup_cluster(NoHadoopBareImageProvider(), STANDALONE_PRESTO_CLUSTER)
self.run_prestoadmin('server start -H slave1')
status_output = self._server_status_with_retries()
self.check_status(
status_output,
self.single_node_up_status(self.cluster.internal_slaves[0])
)
# Check that the slave sees that it's stopped, even though the
# discovery server is not up.
self.run_prestoadmin('server stop')
status_output = self._server_status_with_retries()
self.check_status(status_output, self.not_started_status())
def test_connection_to_coordinator_lost(self):
self.setup_cluster(NoHadoopBareImageProvider(), STANDALONE_PA_CLUSTER)
topology = {"coordinator": "slave1", "workers":
["master", "slave2", "slave3"]}
self.upload_topology(topology=topology)
self.installer.install(coordinator='slave1')
self.run_prestoadmin('server start')
self.cluster.stop_host(
self.cluster.slaves[0])
topology = {"coordinator": self.cluster.get_down_hostname("slave1"),
"workers": ["master", "slave2", "slave3"]}
status_output = self._server_status_with_retries()
statuses = self.node_not_available_status(
topology, self.cluster.internal_slaves[0],
coordinator_down=True)
self.check_status(status_output, statuses)
def test_connection_to_worker_lost(self):
self.setup_cluster(NoHadoopBareImageProvider(), STANDALONE_PA_CLUSTER)
topology = {"coordinator": "slave1", "workers":
["master", "slave2", "slave3"]}
self.upload_topology(topology=topology)
self.installer.install(coordinator='slave1')
self.run_prestoadmin('server start')
self.cluster.stop_host(
self.cluster.slaves[1])
topology = {"coordinator": "slave1", "workers":
["master", self.cluster.get_down_hostname("slave2"),
"slave3"]}
status_output = self._server_status_with_retries(check_catalogs=True)
statuses = self.node_not_available_status(
topology, self.cluster.internal_slaves[1])
self.check_status(status_output, statuses)
def test_status_non_root_user(self):
self.setup_cluster(NoHadoopBareImageProvider(), STANDALONE_PRESTO_CLUSTER)
self.upload_topology(
{"coordinator": "master",
"workers": ["slave1", "slave2", "slave3"],
"username": "app-admin"}
)
self.run_prestoadmin('server start -p password')
status_output = self._server_status_with_retries(check_catalogs=True, extra_arguments=' -p password')
self.check_status(status_output, self.base_status())
def base_status(self, topology=None):
ips = self.cluster.get_ip_address_dict()
if not topology:
topology = {
'coordinator': self.cluster.internal_master, 'workers':
[self.cluster.internal_slaves[0],
self.cluster.internal_slaves[1],
self.cluster.internal_slaves[2]]
}
statuses = []
hosts_in_status = [topology['coordinator']] + topology['workers'][:]
for host in hosts_in_status:
role = 'coordinator' if host is topology['coordinator']\
else 'worker'
status = {'host': host, 'role': role, 'ip': ips[host],
'is_running': 'Running'}
statuses += [status]
return statuses
def not_started_status(self):
statuses = self.base_status()
for status in statuses:
status['ip'] = 'Unknown'
status['is_running'] = 'Not Running'
status['error_message'] = '\tNo information available: ' \
'unable to query coordinator'
return statuses
def not_installed_status(self):
statuses = self.base_status()
for status in statuses:
status['ip'] = 'Unknown'
status['is_running'] = 'Not Running'
status['error_message'] = '\tPresto is not installed.'
return statuses
def single_node_up_status(self, node):
statuses = self.not_started_status()
for status in statuses:
if status['host'] is node:
status['is_running'] = 'Running'
return statuses
def node_not_available_status(self, topology, node,
coordinator_down=False):
statuses = self.base_status(topology)
for status in statuses:
if status['host'] == node:
status['is_running'] = 'Not Running'
status['error_message'] = \
self.status_node_connection_error(node)
status['ip'] = 'Unknown'
status['host'] = self.cluster.get_down_hostname(node)
elif coordinator_down:
status['error_message'] = '\tNo information available: ' \
'unable to query coordinator'
status['ip'] = 'Unknown'
return statuses
def status_fail_msg(self, actual_output, expected_regexp):
log_tail = self.fetch_log_tail(lines=100)
return (
'=== ACTUAL OUTPUT ===\n%s\n=== DID NOT MATCH REGEXP ===\n%s\n'
'=== LOG FOR DEBUGGING ===\n%s=== END OF LOG ===' % (
actual_output, expected_regexp, log_tail))
def check_status(self, cmd_output, statuses, port=7070):
expected_output = []
for status in statuses:
expected_output += \
['Server Status:',
'\t%s\(IP: .+, Roles: %s\): %s' %
(status['host'], status['role'], status['is_running'])]
if 'error_message' in status and status['error_message']:
expected_output += [status['error_message']]
elif status['is_running'] is 'Running':
expected_output += \
['\tNode URI\(http\): http://.+:%s' % str(port),
'\tPresto Version: ' + PRESTO_VERSION,
'\tNode status: active',
'\tCatalogs: system, tpch']
expected_regex = '\n'.join(expected_output)
# The status command is written such that there are a couple ways that
# the presto client can fail that result in partial output from the
# command, but errors in the logs. If we fail to match, we include the
# log information in the assertion message to make determining exactly
# what failed easier. Grab the logs lazily so that we don't incur the
# cost of getting them when they aren't needed. The status tests are
# slow enough already.
self.assertLazyMessage(
lambda: self.status_fail_msg(cmd_output, expected_regex),
self.assertRegexpMatches, cmd_output, expected_regex)
def _server_status_with_retries(self, check_catalogs=False, extra_arguments=''):
try:
return self.retry(lambda: self._get_status_until_coordinator_updated(
check_catalogs, extra_arguments=extra_arguments), 720, 0)
except PrestoError as e:
self.assertLazyMessage(
lambda: self.status_fail_msg(e.message, "Ran out of time retrying status"),
self.fail,
"PrestoError: %s" % e.message)
def _get_status_until_coordinator_updated(self, check_catalogs=False, extra_arguments=''):
status_output = self.run_prestoadmin('server status' + extra_arguments)
if 'the coordinator has not yet discovered this node' in status_output:
raise PrestoError('Coordinator has not discovered all nodes yet: '
'%s' % status_output)
if 'Roles: coordinator): Running\n\tNo information available: ' \
'unable to query coordinator' in status_output:
raise PrestoError('Coordinator not started up properly yet.'
'\nOutput: %s' % status_output)
if check_catalogs and 'Catalogs:' not in status_output:
raise PrestoError('Catalogs not loaded yet: %s' % status_output)
return status_output
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
:class:`DisplayData`, its classes, interfaces and methods.
The classes in this module allow users and transform developers to define
static display data to be displayed when a pipeline runs.
:class:`~apache_beam.transforms.ptransform.PTransform` s,
:class:`~apache_beam.transforms.core.DoFn` s
and other pipeline components are subclasses of the :class:`HasDisplayData`
mixin. To add static display data to a component, you can override the
:meth:`HasDisplayData.display_data()` method.
Available classes:
* :class:`HasDisplayData` - Components that inherit from this class can have
static display data shown in the UI.
* :class:`DisplayDataItem` - This class represents static display data
elements.
* :class:`DisplayData` - Internal class that is used to create display data
and communicate it to the API.
"""
from __future__ import absolute_import
import calendar
import inspect
import json
from builtins import object
from datetime import datetime
from datetime import timedelta
from past.builtins import unicode
__all__ = ['HasDisplayData', 'DisplayDataItem', 'DisplayData']
class HasDisplayData(object):
""" Basic mixin for elements that contain display data.
It implements only the display_data method and a _namespace method.
"""
def display_data(self):
""" Returns the display data associated to a pipeline component.
It should be reimplemented in pipeline components that wish to have
static display data.
Returns:
Dict[str, Any]: A dictionary containing ``key:value`` pairs.
The value might be an integer, float or string value; a
:class:`DisplayDataItem` for values that have more data
(e.g. short value, label, url); or a :class:`HasDisplayData` instance
that has more display data that should be picked up. For example::
{
'key1': 'string_value',
'key2': 1234,
'key3': 3.14159265,
'key4': DisplayDataItem('apache.org', url='http://apache.org'),
'key5': subComponent
}
"""
return {}
def _namespace(self):
return '{}.{}'.format(self.__module__, self.__class__.__name__)
class DisplayData(object):
""" Static display data associated with a pipeline component.
"""
def __init__(self, namespace, display_data_dict):
self.namespace = namespace
self.items = []
self._populate_items(display_data_dict)
def _populate_items(self, display_data_dict):
""" Populates the list of display data items.
"""
for key, element in display_data_dict.items():
if isinstance(element, HasDisplayData):
subcomponent_display_data = DisplayData(element._namespace(),
element.display_data())
self.items += subcomponent_display_data.items
continue
if isinstance(element, DisplayDataItem):
if element.should_drop():
continue
element.key = key
element.namespace = self.namespace
self.items.append(element)
continue
# If it's not a HasDisplayData element,
# nor a dictionary, then it's a simple value
self.items.append(
DisplayDataItem(element,
namespace=self.namespace,
key=key))
@classmethod
def create_from_options(cls, pipeline_options):
""" Creates :class:`DisplayData` from a
:class:`~apache_beam.options.pipeline_options.PipelineOptions` instance.
When creating :class:`DisplayData`, this method will convert the value of
any item of a non-supported type to its string representation.
The normal :meth:`.create_from()` method rejects those items.
Returns:
DisplayData: A :class:`DisplayData` instance with populated items.
Raises:
~exceptions.ValueError: If the **has_display_data** argument is
not an instance of :class:`HasDisplayData`.
"""
from apache_beam.options.pipeline_options import PipelineOptions
if not isinstance(pipeline_options, PipelineOptions):
raise ValueError(
'Element of class {}.{} does not subclass PipelineOptions'
.format(pipeline_options.__module__,
pipeline_options.__class__.__name__))
items = {k: (v if DisplayDataItem._get_value_type(v) is not None
else str(v))
for k, v in pipeline_options.display_data().items()}
return cls(pipeline_options._namespace(), items)
@classmethod
def create_from(cls, has_display_data):
""" Creates :class:`DisplayData` from a :class:`HasDisplayData` instance.
Returns:
DisplayData: A :class:`DisplayData` instance with populated items.
Raises:
~exceptions.ValueError: If the **has_display_data** argument is
not an instance of :class:`HasDisplayData`.
"""
if not isinstance(has_display_data, HasDisplayData):
raise ValueError('Element of class {}.{} does not subclass HasDisplayData'
.format(has_display_data.__module__,
has_display_data.__class__.__name__))
return cls(has_display_data._namespace(), has_display_data.display_data())
class DisplayDataItem(object):
""" A DisplayDataItem represents a unit of static display data.
Each item is identified by a key and the namespace of the component the
display item belongs to.
"""
typeDict = {str:'STRING',
unicode:'STRING',
int:'INTEGER',
float:'FLOAT',
bool: 'BOOLEAN',
timedelta:'DURATION',
datetime:'TIMESTAMP'}
def __init__(self, value, url=None, label=None,
namespace=None, key=None, shortValue=None):
self.namespace = namespace
self.key = key
self.type = self._get_value_type(value)
self.shortValue = (shortValue if shortValue is not None else
self._get_short_value(value, self.type))
self.value = value
self.url = url
self.label = label
self._drop_if_none = False
self._drop_if_default = False
def drop_if_none(self):
""" The item should be dropped if its value is None.
Returns:
Returns self.
"""
self._drop_if_none = True
return self
def drop_if_default(self, default):
""" The item should be dropped if its value is equal to its default.
Returns:
Returns self.
"""
self._default = default
self._drop_if_default = True
return self
def should_drop(self):
""" Return True if the item should be dropped, or False if it should not
be dropped. This depends on the drop_if_none, and drop_if_default calls.
Returns:
True or False; depending on whether the item should be dropped or kept.
"""
if self._drop_if_none and self.value is None:
return True
if self._drop_if_default and self.value == self._default:
return True
return False
def is_valid(self):
""" Checks that all the necessary fields of the :class:`DisplayDataItem`
are filled in. It checks that neither key, namespace, value or type are
:data:`None`.
Raises:
~exceptions.ValueError: If the item does not have a key, namespace,
value or type.
"""
if self.key is None:
raise ValueError(
'Invalid DisplayDataItem %s. Key must not be None.' % self)
if self.namespace is None:
raise ValueError(
'Invalid DisplayDataItem %s. Namespace must not be None' % self)
if self.value is None:
raise ValueError(
'Invalid DisplayDataItem %s. Value must not be None' % self)
if self.type is None:
raise ValueError(
'Invalid DisplayDataItem. Value {} is of an unsupported type.'
.format(self.value))
def _get_dict(self):
res = {'key': self.key,
'namespace': self.namespace,
'type': self.type if self.type != 'CLASS' else 'STRING'}
# TODO: Python Class types should not be special-cased once
# the Fn API is in.
if self.url is not None:
res['url'] = self.url
if self.shortValue is not None:
res['shortValue'] = self.shortValue
if self.label is not None:
res['label'] = self.label
res['value'] = self._format_value(self.value, self.type)
return res
def get_dict(self):
""" Returns the internal-API dictionary representing the
:class:`DisplayDataItem`.
Returns:
Dict[str, Any]: A dictionary. The internal-API dictionary representing
the :class:`DisplayDataItem`.
Raises:
~exceptions.ValueError: if the item is not valid.
"""
self.is_valid()
return self._get_dict()
def __repr__(self):
return 'DisplayDataItem({})'.format(json.dumps(self._get_dict()))
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._get_dict() == other._get_dict()
return False
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __hash__(self):
return hash(tuple(sorted(self._get_dict().items())))
@classmethod
def _format_value(cls, value, type_):
""" Returns the API representation of a value given its type.
Args:
value: The value of the item that needs to be shortened.
type_(string): The type of the value.
Returns:
A formatted value in the form of a float, int, or string.
"""
res = value
if type_ == 'CLASS':
res = '{}.{}'.format(value.__module__, value.__name__)
elif type_ == 'DURATION':
res = value.total_seconds()*1000
elif type_ == 'TIMESTAMP':
res = calendar.timegm(value.timetuple())*1000 + value.microsecond//1000
return res
@classmethod
def _get_short_value(cls, value, type_):
""" Calculates the short value for an item.
Args:
value: The value of the item that needs to be shortened.
type_(string): The type of the value.
Returns:
The unqualified name of a class if type_ is 'CLASS'. None otherwise.
"""
if type_ == 'CLASS':
return value.__name__
return None
@classmethod
def _get_value_type(cls, value):
""" Infers the type of a given value.
Args:
value: The value whose type needs to be inferred. For 'DURATION' and
'TIMESTAMP', the corresponding Python type is datetime.timedelta and
datetime.datetime respectively. For Python classes, the API type is
just 'STRING' at the moment.
Returns:
One of 'STRING', 'INTEGER', 'FLOAT', 'CLASS', 'DURATION', or
'TIMESTAMP', depending on the type of the value.
"""
#TODO: Fix Args: documentation once the Python classes handling has changed
type_ = cls.typeDict.get(type(value))
if type_ is None:
type_ = 'CLASS' if inspect.isclass(value) else None
if type_ is None and value is None:
type_ = 'STRING'
return type_
| |
# Mount RPC client -- RFC 1094 (NFS), Appendix A
# This module demonstrates how to write your own RPC client in Python.
# When this example was written, there was no RPC compiler for
# Python. Without such a compiler, you must first create classes
# derived from Packer and Unpacker to handle the data types for the
# server you want to interface to. You then write the client class.
# If you want to support both the TCP and the UDP version of a
# protocol, use multiple inheritance as shown below.
import rpc
from rpc import Packer, Unpacker, TCPClient, UDPClient
# Program number and version for the mount protocol
MOUNTPROG = 100005
MOUNTVERS = 1
# Size of the 'fhandle' opaque structure
FHSIZE = 32
# Packer derived class for Mount protocol clients.
# The only thing we need to pack beyond basic types is an 'fhandle'
class MountPacker(Packer):
def pack_fhandle(self, fhandle):
self.pack_fopaque(FHSIZE, fhandle)
# Unpacker derived class for Mount protocol clients.
# The important types we need to unpack are fhandle, fhstatus,
# mountlist and exportlist; mountstruct, exportstruct and groups are
# used to unpack components of mountlist and exportlist and the
# corresponding functions are passed as function argument to the
# generic unpack_list function.
class MountUnpacker(Unpacker):
def unpack_fhandle(self):
return self.unpack_fopaque(FHSIZE)
def unpack_fhstatus(self):
status = self.unpack_uint()
if status == 0:
fh = self.unpack_fhandle()
else:
fh = None
return status, fh
def unpack_mountlist(self):
return self.unpack_list(self.unpack_mountstruct)
def unpack_mountstruct(self):
hostname = self.unpack_string()
directory = self.unpack_string()
return (hostname, directory)
def unpack_exportlist(self):
return self.unpack_list(self.unpack_exportstruct)
def unpack_exportstruct(self):
filesys = self.unpack_string()
groups = self.unpack_groups()
return (filesys, groups)
def unpack_groups(self):
return self.unpack_list(self.unpack_string)
# These are the procedures specific to the Mount client class.
# Think of this as a derived class of either TCPClient or UDPClient.
class PartialMountClient:
# This method is called by Client.__init__ to initialize
# self.packer and self.unpacker
def addpackers(self):
self.packer = MountPacker()
self.unpacker = MountUnpacker('')
# This method is called by Client.__init__ to bind the socket
# to a particular network interface and port. We use the
# default network interface, but if we're running as root,
# we want to bind to a reserved port
def bindsocket(self):
import os
try:
uid = os.getuid()
except AttributeError:
uid = 1
if uid == 0:
port = rpc.bindresvport(self.sock, '')
# 'port' is not used
else:
self.sock.bind(('', 0))
# This function is called to cough up a suitable
# authentication object for a call to procedure 'proc'.
def mkcred(self):
if self.cred == None:
self.cred = rpc.AUTH_UNIX, rpc.make_auth_unix_default()
return self.cred
# The methods Mnt, Dump etc. each implement one Remote
# Procedure Call. This is done by calling self.make_call()
# with as arguments:
#
# - the procedure number
# - the arguments (or None)
# - the "packer" function for the arguments (or None)
# - the "unpacker" function for the return value (or None)
#
# The packer and unpacker function, if not None, *must* be
# methods of self.packer and self.unpacker, respectively.
# A value of None means that there are no arguments or is no
# return value, respectively.
#
# The return value from make_call() is the return value from
# the remote procedure call, as unpacked by the "unpacker"
# function, or None if the unpacker function is None.
#
# (Even if you expect a result of None, you should still
# return the return value from make_call(), since this may be
# needed by a broadcasting version of the class.)
#
# If the call fails, make_call() raises an exception
# (this includes time-outs and invalid results).
#
# Note that (at least with the UDP protocol) there is no
# guarantee that a call is executed at most once. When you do
# get a reply, you know it has been executed at least once;
# when you don't get a reply, you know nothing.
def Mnt(self, directory):
return self.make_call(1, directory, \
self.packer.pack_string, \
self.unpacker.unpack_fhstatus)
def Dump(self):
return self.make_call(2, None, \
None, self.unpacker.unpack_mountlist)
def Umnt(self, directory):
return self.make_call(3, directory, \
self.packer.pack_string, None)
def Umntall(self):
return self.make_call(4, None, None, None)
def Export(self):
return self.make_call(5, None, \
None, self.unpacker.unpack_exportlist)
# We turn the partial Mount client into a full one for either protocol
# by use of multiple inheritance. (In general, when class C has base
# classes B1...Bn, if x is an instance of class C, methods of x are
# searched first in C, then in B1, then in B2, ..., finally in Bn.)
class TCPMountClient(PartialMountClient, TCPClient):
def __init__(self, host):
TCPClient.__init__(self, host, MOUNTPROG, MOUNTVERS)
class UDPMountClient(PartialMountClient, UDPClient):
def __init__(self, host):
UDPClient.__init__(self, host, MOUNTPROG, MOUNTVERS)
# A little test program for the Mount client. This takes a host as
# command line argument (default the local machine), prints its export
# list, and attempts to mount and unmount each exported files system.
# An optional first argument of -t or -u specifies the protocol to use
# (TCP or UDP), default is UDP.
def test():
import sys
if sys.argv[1:] and sys.argv[1] == '-t':
C = TCPMountClient
del sys.argv[1]
elif sys.argv[1:] and sys.argv[1] == '-u':
C = UDPMountClient
del sys.argv[1]
else:
C = UDPMountClient
if sys.argv[1:]: host = sys.argv[1]
else: host = ''
mcl = C(host)
list = mcl.Export()
for item in list:
print item
try:
mcl.Mnt(item[0])
except:
print 'Sorry'
continue
mcl.Umnt(item[0])
| |
# Copyright (c) 2008,2015,2017,2019 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Test the `basic` module."""
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from metpy.calc import (add_height_to_pressure, add_pressure_to_height,
altimeter_to_sea_level_pressure, altimeter_to_station_pressure,
apparent_temperature, coriolis_parameter, geopotential_to_height,
heat_index, height_to_geopotential, height_to_pressure_std,
pressure_to_height_std, sigma_to_pressure, smooth_circular,
smooth_gaussian, smooth_n_point, smooth_rectangular, smooth_window,
wind_components, wind_direction, wind_speed, windchill)
from metpy.testing import (assert_almost_equal, assert_array_almost_equal, assert_array_equal)
from metpy.units import units
def test_wind_comps_basic():
"""Test the basic wind component calculation."""
speed = np.array([4, 4, 4, 4, 25, 25, 25, 25, 10.]) * units.mph
dirs = np.array([0, 45, 90, 135, 180, 225, 270, 315, 360]) * units.deg
s2 = np.sqrt(2.)
u, v = wind_components(speed, dirs)
true_u = np.array([0, -4 / s2, -4, -4 / s2, 0, 25 / s2, 25, 25 / s2, 0]) * units.mph
true_v = np.array([-4, -4 / s2, 0, 4 / s2, 25, 25 / s2, 0, -25 / s2, -10]) * units.mph
assert_array_almost_equal(true_u, u, 4)
assert_array_almost_equal(true_v, v, 4)
def test_wind_comps_with_north_and_calm():
"""Test that the wind component calculation handles northerly and calm winds."""
speed = np.array([0, 5, 5]) * units.mph
dirs = np.array([0, 360, 0]) * units.deg
u, v = wind_components(speed, dirs)
true_u = np.array([0, 0, 0]) * units.mph
true_v = np.array([0, -5, -5]) * units.mph
assert_array_almost_equal(true_u, u, 4)
assert_array_almost_equal(true_v, v, 4)
def test_wind_comps_scalar():
"""Test wind components calculation with scalars."""
u, v = wind_components(8 * units('m/s'), 150 * units.deg)
assert_almost_equal(u, -4 * units('m/s'), 3)
assert_almost_equal(v, 6.9282 * units('m/s'), 3)
def test_speed():
"""Test calculating wind speed."""
u = np.array([4., 2., 0., 0.]) * units('m/s')
v = np.array([0., 2., 4., 0.]) * units('m/s')
speed = wind_speed(u, v)
s2 = np.sqrt(2.)
true_speed = np.array([4., 2 * s2, 4., 0.]) * units('m/s')
assert_array_almost_equal(true_speed, speed, 4)
def test_direction():
"""Test calculating wind direction."""
u = np.array([4., 2., 0., 0.]) * units('m/s')
v = np.array([0., 2., 4., 0.]) * units('m/s')
direc = wind_direction(u, v)
true_dir = np.array([270., 225., 180., 0.]) * units.deg
assert_array_almost_equal(true_dir, direc, 4)
def test_direction_masked():
"""Test calculating wind direction from masked wind components."""
mask = np.array([True, False, True, False])
u = np.array([4., 2., 0., 0.])
v = np.array([0., 2., 4., 0.])
u_masked = units.Quantity(np.ma.array(u, mask=mask), units('m/s'))
v_masked = units.Quantity(np.ma.array(v, mask=mask), units('m/s'))
direc = wind_direction(u_masked, v_masked)
true_dir = np.array([270., 225., 180., 0.])
true_dir_masked = units.Quantity(np.ma.array(true_dir, mask=mask), units.deg)
assert_array_almost_equal(true_dir_masked, direc, 4)
def test_direction_with_north_and_calm():
"""Test how wind direction handles northerly and calm winds."""
u = np.array([0., -0., 0.]) * units('m/s')
v = np.array([0., 0., -5.]) * units('m/s')
direc = wind_direction(u, v)
true_dir = np.array([0., 0., 360.]) * units.deg
assert_array_almost_equal(true_dir, direc, 4)
def test_direction_dimensions():
"""Verify wind_direction returns degrees."""
d = wind_direction(3. * units('m/s'), 4. * units('m/s'))
assert str(d.units) == 'degree'
def test_oceanographic_direction():
"""Test oceanographic direction (to) convention."""
d = wind_direction(5 * units('m/s'), -5 * units('m/s'), convention='to')
true_dir = 135 * units.deg
assert_almost_equal(d, true_dir, 4)
def test_invalid_direction_convention():
"""Test the error that is returned if the convention kwarg is not valid."""
with pytest.raises(ValueError):
wind_direction(1 * units('m/s'), 5 * units('m/s'), convention='test')
def test_speed_direction_roundtrip():
"""Test round-tripping between speed/direction and components."""
# Test each quadrant of the whole circle
wspd = np.array([15., 5., 2., 10.]) * units.meters / units.seconds
wdir = np.array([160., 30., 225., 350.]) * units.degrees
u, v = wind_components(wspd, wdir)
wdir_out = wind_direction(u, v)
wspd_out = wind_speed(u, v)
assert_array_almost_equal(wspd, wspd_out, 4)
assert_array_almost_equal(wdir, wdir_out, 4)
def test_scalar_speed():
"""Test wind speed with scalars."""
s = wind_speed(-3. * units('m/s'), -4. * units('m/s'))
assert_almost_equal(s, 5. * units('m/s'), 3)
def test_scalar_direction():
"""Test wind direction with scalars."""
d = wind_direction(3. * units('m/s'), 4. * units('m/s'))
assert_almost_equal(d, 216.870 * units.deg, 3)
def test_windchill_scalar():
"""Test wind chill with scalars."""
wc = windchill(-5 * units.degC, 35 * units('m/s'))
assert_almost_equal(wc, -18.9357 * units.degC, 0)
def test_windchill_basic():
"""Test the basic wind chill calculation."""
temp = np.array([40, -10, -45, 20]) * units.degF
speed = np.array([5, 55, 25, 15]) * units.mph
wc = windchill(temp, speed)
values = np.array([36, -46, -84, 6]) * units.degF
assert_array_almost_equal(wc, values, 0)
def test_windchill_kelvin():
"""Test wind chill when given Kelvin temperatures."""
wc = windchill(268.15 * units.kelvin, 35 * units('m/s'))
assert_almost_equal(wc, -18.9357 * units.degC, 0)
def test_windchill_invalid():
"""Test windchill for values that should be masked."""
temp = np.array([10, 51, 49, 60, 80, 81]) * units.degF
speed = np.array([4, 4, 3, 1, 10, 39]) * units.mph
wc = windchill(temp, speed)
# We don't care about the masked values
truth = units.Quantity(np.ma.array([2.6230789, np.nan, np.nan, np.nan, np.nan, np.nan],
mask=[False, True, True, True, True, True]), units.degF)
assert_array_almost_equal(truth, wc)
def test_windchill_undefined_flag():
"""Test whether masking values for windchill can be disabled."""
temp = units.Quantity(np.ma.array([49, 50, 49, 60, 80, 81]), units.degF)
speed = units.Quantity(([4, 4, 3, 1, 10, 39]), units.mph)
wc = windchill(temp, speed, mask_undefined=False)
mask = np.array([False] * 6)
assert_array_equal(wc.mask, mask)
def test_windchill_face_level():
"""Test windchill using the face_level flag."""
temp = np.array([20, 0, -20, -40]) * units.degF
speed = np.array([15, 30, 45, 60]) * units.mph
wc = windchill(temp, speed, face_level_winds=True)
values = np.array([3, -30, -64, -98]) * units.degF
assert_array_almost_equal(wc, values, 0)
def test_heat_index_basic():
"""Test the basic heat index calculation."""
temp = np.array([80, 88, 92, 110, 86]) * units.degF
rh = np.array([40, 100, 70, 40, 88]) * units.percent
hi = heat_index(temp, rh)
values = np.array([80, 121, 112, 136, 104]) * units.degF
assert_array_almost_equal(hi, values, 0)
def test_heat_index_scalar():
"""Test heat index using scalars."""
hi = heat_index(96 * units.degF, 65 * units.percent)
assert_almost_equal(hi, 121 * units.degF, 0)
def test_heat_index_invalid():
"""Test heat index for values that should be masked."""
temp = np.array([80, 88, 92, 79, 30, 81]) * units.degF
rh = np.array([40, 39, 2, 70, 50, 39]) * units.percent
hi = heat_index(temp, rh)
mask = np.array([False, False, False, True, True, False])
assert_array_equal(hi.mask, mask)
def test_heat_index_undefined_flag():
"""Test whether masking values can be disabled for heat index."""
temp = units.Quantity(np.ma.array([80, 88, 92, 79, 30, 81]), units.degF)
rh = units.Quantity(np.ma.array([40, 39, 2, 70, 50, 39]), units.percent)
hi = heat_index(temp, rh, mask_undefined=False)
mask = np.array([False] * 6)
assert_array_equal(hi.mask, mask)
def test_heat_index_units():
"""Test units coming out of heat index."""
temp = units.Quantity([35., 20.], units.degC)
rh = 70 * units.percent
hi = heat_index(temp, rh)
assert_almost_equal(hi.to('degC'), units.Quantity([50.3405, np.nan], units.degC), 4)
def test_heat_index_ratio():
"""Test giving humidity as number [0, 1] to heat index."""
temp = units.Quantity([35., 20.], units.degC)
rh = 0.7
hi = heat_index(temp, rh)
assert_almost_equal(hi.to('degC'), units.Quantity([50.3405, np.nan], units.degC), 4)
def test_heat_index_vs_nws():
"""Test heat_index against online calculated HI from NWS Website."""
# https://www.wpc.ncep.noaa.gov/html/heatindex.shtml, visited 2019-Jul-17
temp = units.Quantity(np.array([86, 111, 40, 96]), units.degF)
rh = np.ma.array([45, 27, 99, 60]) * units.percent
hi = heat_index(temp, rh)
truth = units.Quantity(np.ma.array([87, 121, 40, 116], mask=[False, False, True, False]),
units.degF)
assert_array_almost_equal(hi, truth, 0)
def test_heat_index_kelvin():
"""Test heat_index when given Kelvin temperatures."""
temp = 308.15 * units.degK
rh = 0.7
hi = heat_index(temp, rh)
# NB rounded up test value here vs the above two tests
assert_almost_equal(hi.to('degC'), 50.3406 * units.degC, 4)
def test_height_to_geopotential():
"""Test conversion from height to geopotential."""
height = units.Quantity([0, 1000, 2000, 3000], units.m)
geopot = height_to_geopotential(height)
assert_array_almost_equal(geopot, units.Quantity([0., 9805, 19607,
29406], units('m**2 / second**2')), 0)
# See #1075 regarding previous destructive cancellation in floating point
def test_height_to_geopotential_32bit():
"""Test conversion to geopotential with 32-bit values."""
heights = np.linspace(20597, 20598, 11, dtype=np.float32) * units.m
truth = np.array([201336.64, 201337.62, 201338.6, 201339.58, 201340.55, 201341.53,
201342.5, 201343.48, 201344.45, 201345.44, 201346.39],
dtype=np.float32) * units('J/kg')
assert_almost_equal(height_to_geopotential(heights), truth, 2)
def test_geopotential_to_height():
"""Test conversion from geopotential to height."""
geopotential = units.Quantity([0., 9805.11102602, 19607.14506998, 29406.10358006],
units('m**2 / second**2'))
height = geopotential_to_height(geopotential)
assert_array_almost_equal(height, units.Quantity([0, 1000, 2000, 3000], units.m), 0)
# See #1075 regarding previous destructive cancellation in floating point
def test_geopotential_to_height_32bit():
"""Test conversion from geopotential to height with 32-bit values."""
geopot = np.arange(201590, 201600, dtype=np.float32) * units('J/kg')
truth = np.array([20623.000, 20623.102, 20623.203, 20623.307, 20623.408,
20623.512, 20623.615, 20623.717, 20623.820, 20623.924],
dtype=np.float32) * units.m
assert_almost_equal(geopotential_to_height(geopot), truth, 2)
# class TestIrrad(object):
# def test_basic(self):
# 'Test the basic solar irradiance calculation.'
# from datetime import date
# d = date(2008, 9, 28)
# lat = 35.25
# hours = np.linspace(6,18,10)
# s = solar_irradiance(lat, d, hours)
# values = np.array([0., 344.1, 682.6, 933.9, 1067.6, 1067.6, 933.9,
# 682.6, 344.1, 0.])
# assert_array_almost_equal(s, values, 1)
# def test_scalar(self):
# from datetime import date
# d = date(2008, 9, 28)
# lat = 35.25
# hour = 9.5
# s = solar_irradiance(lat, d, hour)
# assert_almost_equal(s, 852.1, 1)
# def test_invalid(self):
# 'Test for values that should be masked.'
# from datetime import date
# d = date(2008, 9, 28)
# lat = 35.25
# hours = np.linspace(0,22,12)
# s = solar_irradiance(lat, d, hours)
# mask = np.array([ True, True, True, True, False, False, False,
# False, False, True, True, True])
# assert_array_equal(s.mask, mask)
def test_pressure_to_heights_basic():
"""Test basic pressure to height calculation for standard atmosphere."""
pressures = np.array([975.2, 987.5, 956., 943.]) * units.mbar
heights = pressure_to_height_std(pressures)
values = np.array([321.5, 216.5, 487.6, 601.7]) * units.meter
assert_almost_equal(heights, values, 1)
def test_heights_to_pressure_basic():
"""Test basic height to pressure calculation for standard atmosphere."""
heights = np.array([321.5, 216.5, 487.6, 601.7]) * units.meter
pressures = height_to_pressure_std(heights)
values = np.array([975.2, 987.5, 956., 943.]) * units.mbar
assert_almost_equal(pressures, values, 1)
def test_pressure_to_heights_units():
"""Test that passing non-mbar units works."""
assert_almost_equal(pressure_to_height_std(29 * units.inHg), 262.8498 * units.meter, 3)
def test_coriolis_force():
"""Test basic coriolis force calculation."""
lat = np.array([-90., -30., 0., 30., 90.]) * units.degrees
cor = coriolis_parameter(lat)
values = np.array([-1.4584232E-4, -.72921159E-4, 0, .72921159E-4,
1.4584232E-4]) * units('s^-1')
assert_almost_equal(cor, values, 7)
def test_add_height_to_pressure():
"""Test the pressure at height above pressure calculation."""
pressure = add_height_to_pressure(1000 * units.hPa, 877.17421094 * units.meter)
assert_almost_equal(pressure, 900 * units.hPa, 2)
def test_add_pressure_to_height():
"""Test the height at pressure above height calculation."""
height = add_pressure_to_height(110.8286757 * units.m, 100 * units.hPa)
assert_almost_equal(height, 987.971601 * units.meter, 3)
def test_sigma_to_pressure():
"""Test sigma_to_pressure."""
surface_pressure = 1000. * units.hPa
model_top_pressure = 0. * units.hPa
sigma = np.arange(0., 1.1, 0.1)
expected = np.arange(0., 1100., 100.) * units.hPa
pressure = sigma_to_pressure(sigma, surface_pressure, model_top_pressure)
assert_array_almost_equal(pressure, expected, 5)
def test_warning_dir():
"""Test that warning is raised wind direction > 2Pi."""
with pytest.warns(UserWarning):
wind_components(3. * units('m/s'), 270)
def test_coriolis_warning():
"""Test that warning is raise when latitude larger than pi radians."""
with pytest.warns(UserWarning):
coriolis_parameter(50)
with pytest.warns(UserWarning):
coriolis_parameter(-50)
def test_coriolis_units():
"""Test that coriolis returns units of 1/second."""
f = coriolis_parameter(50 * units.degrees)
assert f.units == units('1/second')
def test_apparent_temperature():
"""Test the apparent temperature calculation."""
temperature = np.array([[90, 90, 70],
[20, 20, 60]]) * units.degF
rel_humidity = np.array([[60, 20, 60],
[10, 10, 10]]) * units.percent
wind = np.array([[5, 3, 3],
[10, 1, 10]]) * units.mph
truth = units.Quantity(np.ma.array([[99.6777178, 86.3357671, 70], [8.8140662, 20, 60]],
mask=[[False, False, True], [False, True, True]]),
units.degF)
res = apparent_temperature(temperature, rel_humidity, wind)
assert_array_almost_equal(res, truth, 6)
def test_apparent_temperature_scalar():
"""Test the apparent temperature calculation with a scalar."""
temperature = 90 * units.degF
rel_humidity = 60 * units.percent
wind = 5 * units.mph
truth = 99.6777178 * units.degF
res = apparent_temperature(temperature, rel_humidity, wind)
assert_almost_equal(res, truth, 6)
def test_apparent_temperature_scalar_no_modification():
"""Test the apparent temperature calculation with a scalar that is NOOP."""
temperature = 70 * units.degF
rel_humidity = 60 * units.percent
wind = 5 * units.mph
truth = 70 * units.degF
res = apparent_temperature(temperature, rel_humidity, wind, mask_undefined=False)
assert_almost_equal(res, truth, 6)
def test_apparent_temperature_windchill():
"""Test that apparent temperature works when a windchill is calculated."""
temperature = -5. * units.degC
rel_humidity = 50. * units.percent
wind = 35. * units('m/s')
truth = -18.9357 * units.degC
res = apparent_temperature(temperature, rel_humidity, wind)
assert_almost_equal(res, truth, 0)
def test_apparent_temperature_mask_undefined_false():
"""Test that apparent temperature works when mask_undefined is False."""
temp = np.array([80, 55, 10]) * units.degF
rh = np.array([40, 50, 25]) * units.percent
wind = np.array([5, 4, 10]) * units('m/s')
app_temperature = apparent_temperature(temp, rh, wind, mask_undefined=False)
assert not hasattr(app_temperature, 'mask')
def test_apparent_temperature_mask_undefined_true():
"""Test that apparent temperature works when mask_undefined is True."""
temp = np.array([80, 55, 10]) * units.degF
rh = np.array([40, 50, 25]) * units.percent
wind = np.array([5, 4, 10]) * units('m/s')
app_temperature = apparent_temperature(temp, rh, wind, mask_undefined=True)
mask = [False, True, False]
assert_array_equal(app_temperature.mask, mask)
def test_smooth_gaussian():
"""Test the smooth_gaussian function with a larger n."""
m = 10
s = np.zeros((m, m))
for i in np.ndindex(s.shape):
s[i] = i[0] + i[1]**2
s = smooth_gaussian(s, 4)
s_true = np.array([[0.40077472, 1.59215426, 4.59665817, 9.59665817, 16.59665817,
25.59665817, 36.59665817, 49.59665817, 64.51108392, 77.87487258],
[1.20939518, 2.40077472, 5.40527863, 10.40527863, 17.40527863,
26.40527863, 37.40527863, 50.40527863, 65.31970438, 78.68349304],
[2.20489127, 3.39627081, 6.40077472, 11.40077472, 18.40077472,
27.40077472, 38.40077472, 51.40077472, 66.31520047, 79.67898913],
[3.20489127, 4.39627081, 7.40077472, 12.40077472, 19.40077472,
28.40077472, 39.40077472, 52.40077472, 67.31520047, 80.67898913],
[4.20489127, 5.39627081, 8.40077472, 13.40077472, 20.40077472,
29.40077472, 40.40077472, 53.40077472, 68.31520047, 81.67898913],
[5.20489127, 6.39627081, 9.40077472, 14.40077472, 21.40077472,
30.40077472, 41.40077472, 54.40077472, 69.31520047, 82.67898913],
[6.20489127, 7.39627081, 10.40077472, 15.40077472, 22.40077472,
31.40077472, 42.40077472, 55.40077472, 70.31520047, 83.67898913],
[7.20489127, 8.39627081, 11.40077472, 16.40077472, 23.40077472,
32.40077472, 43.40077472, 56.40077472, 71.31520047, 84.67898913],
[8.20038736, 9.3917669, 12.39627081, 17.39627081, 24.39627081,
33.39627081, 44.39627081, 57.39627081, 72.31069656, 85.67448522],
[9.00900782, 10.20038736, 13.20489127, 18.20489127, 25.20489127,
34.20489127, 45.20489127, 58.20489127, 73.11931702, 86.48310568]])
assert_array_almost_equal(s, s_true)
def test_smooth_gaussian_small_n():
"""Test the smooth_gaussian function with a smaller n."""
m = 5
s = np.zeros((m, m))
for i in np.ndindex(s.shape):
s[i] = i[0] + i[1]**2
s = smooth_gaussian(s, 1)
s_true = [[0.0141798077, 1.02126971, 4.02126971, 9.02126971, 15.9574606],
[1.00708990, 2.01417981, 5.01417981, 10.0141798, 16.9503707],
[2.00708990, 3.01417981, 6.01417981, 11.0141798, 17.9503707],
[3.00708990, 4.01417981, 7.01417981, 12.0141798, 18.9503707],
[4.00000000, 5.00708990, 8.00708990, 13.0070899, 19.9432808]]
assert_array_almost_equal(s, s_true)
def test_smooth_gaussian_3d_units():
"""Test the smooth_gaussian function with units and a 3D array."""
m = 5
s = np.zeros((3, m, m))
for i in np.ndindex(s.shape):
s[i] = i[1] + i[2]**2
s[0::2, :, :] = 10 * s[0::2, :, :]
s = s * units('m')
s = smooth_gaussian(s, 1)
s_true = ([[0.0141798077, 1.02126971, 4.02126971, 9.02126971, 15.9574606],
[1.00708990, 2.01417981, 5.01417981, 10.0141798, 16.9503707],
[2.00708990, 3.01417981, 6.01417981, 11.0141798, 17.9503707],
[3.00708990, 4.01417981, 7.01417981, 12.0141798, 18.9503707],
[4.00000000, 5.00708990, 8.00708990, 13.0070899, 19.9432808]]) * units('m')
assert_array_almost_equal(s[1, :, :], s_true)
def test_smooth_n_pt_5():
"""Test the smooth_n_pt function using 5 points."""
hght = np.array([[5640., 5640., 5640., 5640., 5640.],
[5684., 5676., 5666., 5659., 5651.],
[5728., 5712., 5692., 5678., 5662.],
[5772., 5748., 5718., 5697., 5673.],
[5816., 5784., 5744., 5716., 5684.]])
shght = smooth_n_point(hght, 5, 1)
s_true = np.array([[5640., 5640., 5640., 5640., 5640.],
[5684., 5675.75, 5666.375, 5658.875, 5651.],
[5728., 5711.5, 5692.75, 5677.75, 5662.],
[5772., 5747.25, 5719.125, 5696.625, 5673.],
[5816., 5784., 5744., 5716., 5684.]])
assert_array_almost_equal(shght, s_true)
def test_smooth_n_pt_5_units():
"""Test the smooth_n_pt function using 5 points with units."""
hght = np.array([[5640., 5640., 5640., 5640., 5640.],
[5684., 5676., 5666., 5659., 5651.],
[5728., 5712., 5692., 5678., 5662.],
[5772., 5748., 5718., 5697., 5673.],
[5816., 5784., 5744., 5716., 5684.]]) * units.meter
shght = smooth_n_point(hght, 5, 1)
s_true = np.array([[5640., 5640., 5640., 5640., 5640.],
[5684., 5675.75, 5666.375, 5658.875, 5651.],
[5728., 5711.5, 5692.75, 5677.75, 5662.],
[5772., 5747.25, 5719.125, 5696.625, 5673.],
[5816., 5784., 5744., 5716., 5684.]]) * units.meter
assert_array_almost_equal(shght, s_true)
def test_smooth_n_pt_9_units():
"""Test the smooth_n_pt function using 9 points with units."""
hght = np.array([[5640., 5640., 5640., 5640., 5640.],
[5684., 5676., 5666., 5659., 5651.],
[5728., 5712., 5692., 5678., 5662.],
[5772., 5748., 5718., 5697., 5673.],
[5816., 5784., 5744., 5716., 5684.]]) * units.meter
shght = smooth_n_point(hght, 9, 1)
s_true = np.array([[5640., 5640., 5640., 5640., 5640.],
[5684., 5675.5, 5666.75, 5658.75, 5651.],
[5728., 5711., 5693.5, 5677.5, 5662.],
[5772., 5746.5, 5720.25, 5696.25, 5673.],
[5816., 5784., 5744., 5716., 5684.]]) * units.meter
assert_array_almost_equal(shght, s_true)
def test_smooth_n_pt_9_repeat():
"""Test the smooth_n_pt function using 9 points with two passes."""
hght = np.array([[5640., 5640., 5640., 5640., 5640.],
[5684., 5676., 5666., 5659., 5651.],
[5728., 5712., 5692., 5678., 5662.],
[5772., 5748., 5718., 5697., 5673.],
[5816., 5784., 5744., 5716., 5684.]])
shght = smooth_n_point(hght, 9, 2)
s_true = np.array([[5640., 5640., 5640., 5640., 5640.],
[5684., 5675.4375, 5666.9375, 5658.8125, 5651.],
[5728., 5710.875, 5693.875, 5677.625, 5662.],
[5772., 5746.375, 5720.625, 5696.375, 5673.],
[5816., 5784., 5744., 5716., 5684.]])
assert_array_almost_equal(shght, s_true)
def test_smooth_n_pt_wrong_number():
"""Test the smooth_n_pt function using wrong number of points."""
hght = np.array([[5640., 5640., 5640., 5640., 5640.],
[5684., 5676., 5666., 5659., 5651.],
[5728., 5712., 5692., 5678., 5662.],
[5772., 5748., 5718., 5697., 5673.],
[5816., 5784., 5744., 5716., 5684.]])
with pytest.raises(ValueError):
smooth_n_point(hght, 7)
def test_smooth_n_pt_3d_units():
"""Test the smooth_n_point function with a 3D array with units."""
hght = [[[5640.0, 5640.0, 5640.0, 5640.0, 5640.0],
[5684.0, 5676.0, 5666.0, 5659.0, 5651.0],
[5728.0, 5712.0, 5692.0, 5678.0, 5662.0],
[5772.0, 5748.0, 5718.0, 5697.0, 5673.0],
[5816.0, 5784.0, 5744.0, 5716.0, 5684.0]],
[[6768.0, 6768.0, 6768.0, 6768.0, 6768.0],
[6820.8, 6811.2, 6799.2, 6790.8, 6781.2],
[6873.6, 6854.4, 6830.4, 6813.6, 6794.4],
[6926.4, 6897.6, 6861.6, 6836.4, 6807.6],
[6979.2, 6940.8, 6892.8, 6859.2, 6820.8]]] * units.m
shght = smooth_n_point(hght, 9, 2)
s_true = [[[5640., 5640., 5640., 5640., 5640.],
[5684., 5675.4375, 5666.9375, 5658.8125, 5651.],
[5728., 5710.875, 5693.875, 5677.625, 5662.],
[5772., 5746.375, 5720.625, 5696.375, 5673.],
[5816., 5784., 5744., 5716., 5684.]],
[[6768., 6768., 6768., 6768., 6768.],
[6820.8, 6810.525, 6800.325, 6790.575, 6781.2],
[6873.6, 6853.05, 6832.65, 6813.15, 6794.4],
[6926.4, 6895.65, 6864.75, 6835.65, 6807.6],
[6979.2, 6940.8, 6892.8, 6859.2, 6820.8]]] * units.m
assert_array_almost_equal(shght, s_true)
def test_smooth_n_pt_temperature():
"""Test the smooth_n_pt function with temperature units."""
t = np.array([[2.73, 3.43, 6.53, 7.13, 4.83],
[3.73, 4.93, 6.13, 6.63, 8.23],
[3.03, 4.83, 6.03, 7.23, 7.63],
[3.33, 4.63, 7.23, 6.73, 6.23],
[3.93, 3.03, 7.43, 9.23, 9.23]]) * units.degC
smooth_t = smooth_n_point(t, 9, 1)
smooth_t_true = np.array([[2.73, 3.43, 6.53, 7.13, 4.83],
[3.73, 4.6425, 5.96125, 6.81124, 8.23],
[3.03, 4.81125, 6.1175, 6.92375, 7.63],
[3.33, 4.73625, 6.43, 7.3175, 6.23],
[3.93, 3.03, 7.43, 9.23, 9.23]]) * units.degC
assert_array_almost_equal(smooth_t, smooth_t_true, 4)
def test_smooth_gaussian_temperature():
"""Test the smooth_gaussian function with temperature units."""
t = np.array([[2.73, 3.43, 6.53, 7.13, 4.83],
[3.73, 4.93, 6.13, 6.63, 8.23],
[3.03, 4.83, 6.03, 7.23, 7.63],
[3.33, 4.63, 7.23, 6.73, 6.23],
[3.93, 3.03, 7.43, 9.23, 9.23]]) * units.degC
smooth_t = smooth_gaussian(t, 3)
smooth_t_true = np.array([[2.8892, 3.7657, 6.2805, 6.8532, 5.3174],
[3.6852, 4.799, 6.0844, 6.7816, 7.7617],
[3.2762, 4.787, 6.117, 7.0792, 7.5181],
[3.4618, 4.6384, 6.886, 6.982, 6.6653],
[3.8115, 3.626, 7.1705, 8.8528, 8.9605]]) * units.degC
assert_array_almost_equal(smooth_t, smooth_t_true, 4)
def test_smooth_window():
"""Test smooth_window with default configuration."""
hght = [[5640., 5640., 5640., 5640., 5640.],
[5684., 5676., 5666., 5659., 5651.],
[5728., 5712., 5692., 5678., 5662.],
[5772., 5748., 5718., 5697., 5673.],
[5816., 5784., 5744., 5716., 5684.]] * units.m
smoothed = smooth_window(hght, np.array([[1, 0, 1], [0, 0, 0], [1, 0, 1]]))
truth = [[5640., 5640., 5640., 5640., 5640.],
[5684., 5675., 5667.5, 5658.5, 5651.],
[5728., 5710., 5695., 5677., 5662.],
[5772., 5745., 5722.5, 5695.5, 5673.],
[5816., 5784., 5744., 5716., 5684.]] * units.m
assert_array_almost_equal(smoothed, truth)
def test_smooth_window_1d_dataarray():
"""Test smooth_window on 1D DataArray."""
temperature = xr.DataArray(
[37., 32., 34., 29., 28., 24., 26., 24., 27., 30.],
dims=('time',),
coords={'time': pd.date_range('2020-01-01', periods=10, freq='H')},
attrs={'units': 'degF'})
smoothed = smooth_window(temperature, window=np.ones(3) / 3, normalize_weights=False)
truth = xr.DataArray(
[37., 34.33333333, 31.66666667, 30.33333333, 27., 26., 24.66666667,
25.66666667, 27., 30.] * units.degF,
dims=('time',),
coords={'time': pd.date_range('2020-01-01', periods=10, freq='H')}
)
xr.testing.assert_allclose(smoothed, truth)
def test_smooth_rectangular():
"""Test smooth_rectangular with default configuration."""
hght = [[5640., 5640., 5640., 5640., 5640.],
[5684., 5676., 5666., 5659., 5651.],
[5728., 5712., 5692., 5678., 5662.],
[5772., 5748., 5718., 5697., 5673.],
[5816., 5784., 5744., 5716., 5684.]] * units.m
smoothed = smooth_rectangular(hght, (5, 3))
truth = [[5640., 5640., 5640., 5640., 5640.],
[5684., 5676., 5666., 5659., 5651.],
[5728., 5710.66667, 5694., 5677.33333, 5662.],
[5772., 5748., 5718., 5697., 5673.],
[5816., 5784., 5744., 5716., 5684.]] * units.m
assert_array_almost_equal(smoothed, truth, 4)
def test_smooth_circular():
"""Test smooth_circular with default configuration."""
hght = [[5640., 5640., 5640., 5640., 5640.],
[5684., 5676., 5666., 5659., 5651.],
[5728., 5712., 5692., 5678., 5662.],
[5772., 5748., 5718., 5697., 5673.],
[5816., 5784., 5744., 5716., 5684.]] * units.m
smoothed = smooth_circular(hght, 2, 2)
truth = [[5640., 5640., 5640., 5640., 5640.],
[5684., 5676., 5666., 5659., 5651.],
[5728., 5712., 5693.98817, 5678., 5662.],
[5772., 5748., 5718., 5697., 5673.],
[5816., 5784., 5744., 5716., 5684.]] * units.m
assert_array_almost_equal(smoothed, truth, 4)
def test_smooth_window_with_bad_window():
"""Test smooth_window with a bad window size."""
temperature = [37, 32, 34, 29, 28, 24, 26, 24, 27, 30] * units.degF
with pytest.raises(ValueError) as exc:
smooth_window(temperature, np.ones(4))
assert 'must be odd in all dimensions' in str(exc)
def test_altimeter_to_station_pressure_inhg():
"""Test the altimeter to station pressure function with inches of mercury."""
altim = 29.8 * units.inHg
elev = 500 * units.m
res = altimeter_to_station_pressure(altim, elev)
truth = 950.96498 * units.hectopascal
assert_almost_equal(res, truth, 3)
def test_altimeter_to_station_pressure_hpa():
"""Test the altimeter to station pressure function with hectopascals."""
altim = 1013 * units.hectopascal
elev = 500 * units.m
res = altimeter_to_station_pressure(altim, elev)
truth = 954.639265 * units.hectopascal
assert_almost_equal(res, truth, 3)
def test_altimiter_to_sea_level_pressure_inhg():
"""Test the altimeter to sea level pressure function with inches of mercury."""
altim = 29.8 * units.inHg
elev = 500 * units.m
temp = 30 * units.degC
res = altimeter_to_sea_level_pressure(altim, elev, temp)
truth = 1006.089 * units.hectopascal
assert_almost_equal(res, truth, 3)
def test_altimeter_to_sea_level_pressure_hpa():
"""Test the altimeter to sea level pressure function with hectopascals."""
altim = 1013 * units.hectopascal
elev = 500 * units.m
temp = 0 * units.degC
res = altimeter_to_sea_level_pressure(altim, elev, temp)
truth = 1016.246 * units.hectopascal
assert_almost_equal(res, truth, 3)
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Train an MLP on MNIST using K-FAC.
This library fits a 3-layer, tanh-activated MLP on MNIST using K-FAC. After
~25k steps, this should reach perfect accuracy on the training set.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.kfac.examples import mnist
lc = tf.contrib.kfac.layer_collection
opt = tf.contrib.kfac.optimizer
__all__ = [
"fc_layer",
"train_mnist",
"train_mnist_multitower",
]
def fc_layer(layer_id, inputs, output_size):
"""Builds a fully connected layer.
Args:
layer_id: int. Integer ID for this layer's variables.
inputs: Tensor of shape [num_examples, input_size]. Each row corresponds
to a single example.
output_size: int. Number of output dimensions after fully connected layer.
Returns:
preactivations: Tensor of shape [num_examples, output_size]. Values of the
layer immediately before the activation function.
activations: Tensor of shape [num_examples, output_size]. Values of the
layer immediately after the activation function.
params: Tuple of (weights, bias), parameters for this layer.
"""
# TODO(b/67004004): Delete this function and rely on tf.layers exclusively.
layer = tf.layers.Dense(
output_size,
kernel_initializer=tf.random_normal_initializer(),
name="fc_%d" % layer_id)
preactivations = layer(inputs)
activations = tf.nn.tanh(preactivations)
# layer.weights is a list. This converts it a (hashable) tuple.
return preactivations, activations, (layer.kernel, layer.bias)
def build_model(examples, labels, num_labels, layer_collection):
"""Builds an MLP classification model.
Args:
examples: Tensor of shape [num_examples, num_features]. Represents inputs of
model.
labels: Tensor of shape [num_examples]. Contains integer IDs to be predicted
by softmax for each example.
num_labels: int. Number of distinct values 'labels' can take on.
layer_collection: LayerCollection instance describing model architecture.
Returns:
loss: 0-D Tensor representing loss to be minimized.
accuracy: 0-D Tensor representing model's accuracy.
"""
# Build an MLP. For each layer, we'll keep track of the preactivations,
# activations, weights, and bias.
pre0, act0, params0 = fc_layer(layer_id=0, inputs=examples, output_size=128)
pre1, act1, params1 = fc_layer(layer_id=1, inputs=act0, output_size=64)
pre2, act2, params2 = fc_layer(layer_id=2, inputs=act1, output_size=32)
logits, _, params3 = fc_layer(layer_id=3, inputs=act2, output_size=num_labels)
loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits))
accuracy = tf.reduce_mean(
tf.cast(tf.equal(labels, tf.argmax(logits, axis=1)), dtype=tf.float32))
# Register parameters. K-FAC needs to know about the inputs, outputs, and
# parameters of each layer and the logits powering the posterior probability
# over classes.
tf.logging.info("Building LayerCollection.")
layer_collection.register_fully_connected(params0, examples, pre0)
layer_collection.register_fully_connected(params1, act0, pre1)
layer_collection.register_fully_connected(params2, act1, pre2)
layer_collection.register_fully_connected(params3, act2, logits)
layer_collection.register_categorical_predictive_distribution(
logits, name="logits")
return loss, accuracy
def minimize(loss, accuracy, layer_collection, session_config=None):
"""Minimize 'loss' with KfacOptimizer.
Args:
loss: 0-D Tensor. Loss to be minimized.
accuracy: 0-D Tensor. Accuracy of classifier on current minibatch.
layer_collection: LayerCollection instance. Describes layers in model.
session_config: tf.ConfigProto. Configuration for tf.Session().
Returns:
accuracy of classifier on final minibatch.
"""
# Train with K-FAC. We'll use a decreasing learning rate that's cut in 1/2
# every 10k iterations.
tf.logging.info("Building KFAC Optimizer.")
global_step = tf.train.get_or_create_global_step()
optimizer = opt.KfacOptimizer(
learning_rate=tf.train.exponential_decay(
0.00002, global_step, 10000, 0.5, staircase=True),
cov_ema_decay=0.95,
damping=0.0001,
layer_collection=layer_collection,
momentum=0.99)
train_op = optimizer.minimize(loss, global_step=global_step)
tf.logging.info("Starting training.")
with tf.train.MonitoredTrainingSession(config=session_config) as sess:
while not sess.should_stop():
# K-FAC has 3 primary ops,
# - train_op: Update the weights with the minibatch's gradient.
# - cov_update_op: Update statistics used for building K-FAC's
# preconditioner matrix.
# - inv_update_op: Update preconditioner matrix using statistics.
#
# The first 2 of these are cheap and should be done with each step. The
# latter is more expensive, and should be updated ~100 iterations.
global_step_, loss_, accuracy_, _, _ = sess.run(
[global_step, loss, accuracy, train_op, optimizer.cov_update_op])
if global_step_ % 100 == 0:
sess.run(optimizer.inv_update_op)
if global_step_ % 100 == 0:
tf.logging.info("global_step: %d | loss: %f | accuracy: %f",
global_step_, loss_, accuracy_)
return accuracy_
def train_mnist(data_dir, num_epochs, use_fake_data=False):
"""Train an MLP on MNIST.
Args:
data_dir: string. Directory to read MNIST examples from.
num_epochs: int. Number of passes to make over the training set.
use_fake_data: bool. If True, generate a synthetic dataset.
Returns:
accuracy of model on the final minibatch of training data.
"""
# Load a dataset.
tf.logging.info("Loading MNIST into memory.")
examples, labels = mnist.load_mnist(
data_dir,
num_epochs=num_epochs,
batch_size=64,
flatten_images=True,
use_fake_data=use_fake_data)
# Build an MLP. The model's layers will be added to the LayerCollection.
tf.logging.info("Building model.")
layer_collection = lc.LayerCollection()
loss, accuracy = build_model(examples, labels, 10, layer_collection)
# Fit model.
minimize(loss, accuracy, layer_collection)
def train_mnist_multitower(data_dir,
num_epochs,
num_towers,
use_fake_data=False):
"""Train an MLP on MNIST, splitting the minibatch across multiple towers.
Args:
data_dir: string. Directory to read MNIST examples from.
num_epochs: int. Number of passes to make over the training set.
num_towers: int. Number of CPUs to split minibatch across.
use_fake_data: bool. If True, generate a synthetic dataset.
Returns:
accuracy of model on the final minibatch of training data.
"""
# Load a dataset.
tower_batch_size = 64
batch_size = tower_batch_size * num_towers
tf.logging.info(
("Loading MNIST into memory. Using batch_size = %d = %d towers * %d "
"tower batch size.") % (batch_size, num_towers, tower_batch_size))
examples, labels = mnist.load_mnist(
data_dir,
num_epochs=num_epochs,
batch_size=batch_size,
flatten_images=True,
use_fake_data=use_fake_data)
# Split minibatch across towers.
examples = tf.split(examples, num_towers)
labels = tf.split(labels, num_towers)
# Build an MLP. Each tower's layers will be added to the LayerCollection.
layer_collection = lc.LayerCollection()
tower_results = []
for tower_id in range(num_towers):
with tf.device("/cpu:%d" % tower_id):
with tf.name_scope("tower%d" % tower_id):
with tf.variable_scope(tf.get_variable_scope(), reuse=(tower_id > 0)):
tf.logging.info("Building tower %d." % tower_id)
tower_results.append(
build_model(examples[tower_id], labels[tower_id], 10,
layer_collection))
losses, accuracies = zip(*tower_results)
# Average across towers.
loss = tf.reduce_mean(losses)
accuracy = tf.reduce_mean(accuracies)
# Fit model.
session_config = tf.ConfigProto(
allow_soft_placement=False, device_count={
"CPU": num_towers
})
return minimize(
loss, accuracy, layer_collection, session_config=session_config)
def train_mnist_estimator(data_dir, num_epochs, use_fake_data=False):
"""Train an MLP on MNIST using tf.estimator.
Args:
data_dir: string. Directory to read MNIST examples from.
num_epochs: int. Number of passes to make over the training set.
use_fake_data: bool. If True, generate a synthetic dataset.
Returns:
accuracy of model on the final minibatch of training data.
"""
# Load a dataset.
def input_fn():
tf.logging.info("Loading MNIST into memory.")
return mnist.load_mnist(
data_dir,
num_epochs=num_epochs,
batch_size=64,
flatten_images=True,
use_fake_data=use_fake_data)
def model_fn(features, labels, mode, params):
"""Model function for MLP trained with K-FAC.
Args:
features: Tensor of shape [batch_size, input_size]. Input features.
labels: Tensor of shape [batch_size]. Target labels for training.
mode: tf.estimator.ModeKey. Must be TRAIN.
params: ignored.
Returns:
EstimatorSpec for training.
Raises:
ValueError: If 'mode' is anything other than TRAIN.
"""
del params
if mode != tf.estimator.ModeKeys.TRAIN:
raise ValueError("Only training is supposed with this API.")
# Build a ConvNet.
layer_collection = lc.LayerCollection()
loss, accuracy = build_model(
features, labels, num_labels=10, layer_collection=layer_collection)
# Train with K-FAC.
global_step = tf.train.get_or_create_global_step()
optimizer = opt.KfacOptimizer(
learning_rate=tf.train.exponential_decay(
0.00002, global_step, 10000, 0.5, staircase=True),
cov_ema_decay=0.95,
damping=0.0001,
layer_collection=layer_collection,
momentum=0.99)
# Run cov_update_op every step. Run 1 inv_update_ops per step.
cov_update_op = optimizer.cov_update_op
inv_update_op = tf.group(
tf.contrib.kfac.utils.batch_execute(
global_step, optimizer.inv_update_thunks, batch_size=1))
with tf.control_dependencies([cov_update_op, inv_update_op]):
train_op = optimizer.minimize(loss, global_step=global_step)
# Print metrics every 5 sec.
hooks = [
tf.train.LoggingTensorHook(
{
"loss": loss,
"accuracy": accuracy
}, every_n_secs=5),
]
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, train_op=train_op, training_hooks=hooks)
run_config = tf.estimator.RunConfig(
model_dir="/tmp/mnist", save_checkpoints_steps=1, keep_checkpoint_max=100)
# Train until input_fn() is empty with Estimator. This is a prerequisite for
# TPU compatibility.
estimator = tf.estimator.Estimator(model_fn=model_fn, config=run_config)
estimator.train(input_fn=input_fn)
| |
#!/usr/bin/env python
"""
counts indicies from illumina fastq file
WARNING... INCOMPLETE
"""
import sys
import os
import time
import argparse
import glob
import collections
import ConfigParser
import shlex
import pipes
import subprocess
import tempfile
import re
import errno
import fileinput
import gzip
import itertools
import Levenshtein
### Constants #################################################################
N7_adapters = [
['N701', 'TAAGGCGA'],
['N702', 'CGTACTAG'],
['N703', 'AGGCAGAA'],
['N704', 'TCCTGAGC'],
['N705', 'GGACTCCT'],
['N706', 'TAGGCATG'],
['N707', 'CTCTCTAC'],
['N708', 'CAGAGAGG'],
['N709', 'GCTACGCT'],
['N710', 'CGAGGCTG'],
['N711', 'AAGAGGCA'],
['N712', 'GTAGAGGA'],
]
N5_adapters = [
['N501', 'TAGATCGC'],
['N502', 'CTCTCTAT'],
['N503', 'TATCCTCT'],
['N504', 'AGAGTAGA'],
['N505', 'GTAAGGAG'],
['N506', 'ACTGCATA'],
['N507', 'AAGGAGTA'],
['N508', 'CTAAGCCT'],
]
################################################################################
#def hamming(str1, str2):
# assert len(str1) == len(str2)
# return sum(c1 != c2 for c1, c2 in itertools.izip(str1, str2))
def num2str(val, none_val='0'):
return none_val if val is None else str(val)
class ConfigFakeSecHead(object):
def __init__(self, fp, section='DEFAULTS'):
self.fp = fp
self.sechead = '['+str(section)+']\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else: return self.fp.readline()
class CustomArgparseHelpFormatter(argparse.HelpFormatter):
"""Help message formatter for argparse
combining RawTextHelpFormatter and ArgumentDefaultsHelpFormatter
"""
def _fill_text(self, text, width, indent):
return ''.join([indent + line for line in text.splitlines(True)])
def _split_lines(self, text, width):
return text.splitlines()
def _get_help_string(self, action):
help = action.help
if '%(default)' not in action.help:
if action.default is not argparse.SUPPRESS:
defaulting_nargs = [argparse.OPTIONAL, argparse.ZERO_OR_MORE]
if action.option_strings or action.nargs in defaulting_nargs:
help += ' (default: %(default)s)'
return help
################################################################################
def checkAdapterDistances(adapters, calcDistance, max_distance, out=sys.stderr):
out_width = 6
foo = [ [0]*len(adapters) for _ in range(len(adapters)) ]
for i,a in enumerate(adapters):
for j,b in enumerate(adapters):
foo[i][j] = calcDistance(a[1],b[1])
if( out is not None ): # some fancy-ish output
print >>out, ' '.rjust(out_width),
for a in adapters:
print >>out, str(a[0]).rjust(out_width),
print >>out
for i,a in enumerate(foo):
print >>out, str(adapters[i][0]).rjust(out_width),
for b in a:
print >>out, str(b).rjust(out_width),
print >>out
for i,a in enumerate(foo):
for j,b in enumerate(a):
if( i != j and b <= max_distance ):
print >>sys.stderr, "ERROR: Adapters", adapters[i], "and", adapters[j], "only have distance", b, ", but max-distance is set to", max_distance
sys.exit(0)
def exhuastiveMatch(idx_seq, adapters, calcDistance, max_distance, warning_fails=True, verbose=0):
"""exhaustively looks for best match
return None if there is a tie"""
match = None
tie = False
for i,a in enumerate(adapters):
d = calcDistance(idx_seq, a[1])
if( d <= max_distance ):
if( match is None ):
match = [i,d]
else:
print >>sys.stderr, "WARNING: Multiple matches for",idx_seq
if( warning_fails ):
sys.exit(2)
if( d == match[1] ):
tie = True
if( d < match[1] ):
tie = False
match = [i,d]
if( tie ):
match = None
if( verbose >= 3 ):
print >>sys.stderr, i, idx_seq, a[1], d
return match
### Main ######################################################################
def exit(retcode=0):
# cleanup and exit
global g_start_tic
print >>sys.stderr, "END", "%0.2f"%(time.time()-g_start_tic),"secs elapsed"
sys.exit(retcode)
def Main(argv=None):
print >>sys.stderr, "START"
print >>sys.stderr, "WARNING--- INCOMPLETE SCRIPT"
global g_start_tic
g_start_tic = time.time()
# parse cfg_file argument
conf_parser = argparse.ArgumentParser(description=__doc__,
formatter_class=CustomArgparseHelpFormatter,
add_help=False) # turn off help so later parse (with all opts) handles it
conf_parser.add_argument('-c', '--cfg-file', type=argparse.FileType('r'), help="Config file specifiying options/parameters.\nAny long option can be set by remove the leading '--' and replace '-' with '_'")
args, remaining_argv = conf_parser.parse_known_args(argv)
# build the config (read config files)
if args.cfg_file:
cfg = ConfigParser.SafeConfigParser()
cfg.readfp(ConfigFakeSecHead(args.cfg_file))
defaults = dict(cfg.items("DEFAULTS"))
# special handling of paratmeters that need it like lists
if( 'fastq_files' in defaults ): # fastq_files needs to be a list
defaults['fastq_files'] = [ x for x in defaults['fastq_files'].split('\n') if x and x.strip() and not x.strip()[0] in ['#',';'] ]
else:
defaults = {}
# @TCC TEMP WHILE DEVELOPING
#defaults['fastq_files'] = ['/data/archive/2014-07-21/140715_HS3B/Undetermined_indices/Sample_lane1/lane1_Undetermined_L001_R1_001.fastq.gz']
defaults['fastq_files'] = ['/data/archive/2014-07-21/140715_HS3B/Undetermined_indices/Sample_lane2/lane2_Undetermined_L002_R1_001.fastq.gz']
# Parse rest of arguments with a new ArgumentParser
aparser = argparse.ArgumentParser(description=__doc__, parents=[conf_parser], formatter_class=CustomArgparseHelpFormatter)
aparser.add_argument('fastq_files', metavar='fastq-file', nargs='*')
aparser.add_argument('-H', '--hamming', action='store_true', help="Use Hamming distance instead of Levenshtein distance")
aparser.add_argument('-d', '--max-distance', type=int, default=1, help="Maximum distance (errors) allowed to when assigning a read")
aparser.add_argument('-W', '--warning_fails', action='store_true', help="Exit if there is a warning")
aparser.add_argument('-v', '--verbose', action='count', help="Increase verbosity level")
aparser.set_defaults(**defaults)
# process options/arguments
args = aparser.parse_args(remaining_argv)
# custom/more complex argument parsing errors
# need fastq_files list
if( not args.fastq_files ):
aparser.error("Must provide fastq files as arguments and/or in the CFG_FILE (fastq_files parameter)")
## expand potential globs (wildcards) in the bam_files list
# also checks that bam files exist
fastq_files = []
for f in args.fastq_files:
tmp = glob.glob(f)
if( not tmp ):
raise IOError('Cannot find fastq file(s) matching "'+str(f)+'"')
fastq_files.extend(tmp)
# distance method
if( args.hamming ):
calcDistance = Levenshtein.hamming
else:
calcDistance = Levenshtein.distance
# sanity checking the index adapters distances from each other
print >>sys.stderr, "N7 index adapter distances:"
checkAdapterDistances(N7_adapters, calcDistance, args.max_distance)
print >>sys.stderr, "N5 index adapter distances:"
checkAdapterDistances(N5_adapters, calcDistance, args.max_distance)
counts = {}
foo_counter = 0
match_count = 0
for filename in fastq_files:
print >>sys.stderr, "#"*180
print >>sys.stderr, "Reading:", f
with gzip.open(filename) as f:
tmp = 4e6
in_block_count = 0 # counter used to divide up blocks
for l in f:
if( in_block_count == 0 ): # header line is first line in block
#sys.stdout.write(l)
idx = l.split(':')[-1][:-1]
# count the indices
if( idx in counts ):
counts[idx] += 1
else:
counts[idx] = 1
assert len(idx) == 16, 'Only setup for dual-indexing (N7+N5 length should be 16)'
N7 = idx[0:8]
N5 = idx[8:]
N7_match = exhuastiveMatch(N7, N7_adapters, calcDistance, args.max_distance, args.warning_fails, verbose=args.verbose)
N5_match = exhuastiveMatch(N5, N5_adapters, calcDistance, args.max_distance, args.warning_fails, verbose=args.verbose)
if( args.verbose > 1 ):
print >>sys.stderr, N7, N5, N7_match, N5_match
if( N7_match is not None and N5_match is not None ):
print >>sys.stderr, "*** MATCH ***", N7, N5, N7_match, N5_match
print >>sys.stderr, " ", N7_adapters[N7_match[0]][1], N5_adapters[N5_match[0]][1], N7_adapters[N7_match[0]][0], N5_adapters[N5_match[0]][0]
match_count += 1
foo_counter += 1
if( foo_counter > 1e6 ):
print >>sys.stderr, match_count, "matches"
sys.exit(0)
in_block_count += 1
if( in_block_count > 3 ): # blocks are 4 lines long
in_block_count = 0
# @TCC temp only look at part of file
tmp -= 1
if( tmp < 0 ):
break
sorted_list = [(k,v) for v,k in sorted( [(v,k) for k,v in counts.items()],reverse=True) ]
for v in sorted_list[0:10]:
print v
# Cleanup and end normally
exit()
## functions #############################################################################
#########################################################################
# Main loop hook... if run as script run main, else this is just a module
if __name__ == "__main__":
sys.exit(Main(argv=None))
| |
#!/usr/bin/python3 -i
#
# Copyright (c) 2013-2020 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Base class for working-group-specific style conventions,
# used in generation.
from enum import Enum
# Type categories that respond "False" to isStructAlwaysValid
# basetype is home to typedefs like ..Bool32
CATEGORIES_REQUIRING_VALIDATION = set(('handle',
'enum',
'bitmask',
'basetype',
None))
# These are basic C types pulled in via openxr_platform_defines.h
TYPES_KNOWN_ALWAYS_VALID = set(('char',
'float',
'int8_t', 'uint8_t',
'int32_t', 'uint32_t',
'int64_t', 'uint64_t',
'size_t',
'uintptr_t',
'int',
))
class ProseListFormats(Enum):
"""A connective, possibly with a quantifier."""
AND = 0
EACH_AND = 1
OR = 2
ANY_OR = 3
@classmethod
def from_string(cls, s):
if s == 'or':
return cls.OR
if s == 'and':
return cls.AND
return None
@property
def connective(self):
if self in (ProseListFormats.OR, ProseListFormats.ANY_OR):
return 'or'
return 'and'
def quantifier(self, n):
"""Return the desired quantifier for a list of a given length."""
if self == ProseListFormats.ANY_OR:
if n > 1:
return 'any of '
elif self == ProseListFormats.EACH_AND:
if n > 2:
return 'each of '
if n == 2:
return 'both of '
return ''
class ConventionsBase:
"""WG-specific conventions."""
def __init__(self):
self._command_prefix = None
self._type_prefix = None
def formatExtension(self, name):
"""Mark up an extension name as a link the spec."""
return '`apiext:{}`'.format(name)
@property
def null(self):
"""Preferred spelling of NULL."""
raise NotImplementedError
def makeProseList(self, elements, fmt=ProseListFormats.AND, with_verb=False, *args, **kwargs):
"""Make a (comma-separated) list for use in prose.
Adds a connective (by default, 'and')
before the last element if there are more than 1.
Adds the right one of "is" or "are" to the end if with_verb is true.
Optionally adds a quantifier (like 'any') before a list of 2 or more,
if specified by fmt.
Override with a different method or different call to
_implMakeProseList if you want to add a comma for two elements,
or not use a serial comma.
"""
return self._implMakeProseList(elements, fmt, with_verb, *args, **kwargs)
@property
def struct_macro(self):
"""Get the appropriate format macro for a structure.
May override.
"""
return 'slink:'
@property
def external_macro(self):
"""Get the appropriate format macro for an external type like uint32_t.
May override.
"""
return 'code:'
def makeStructName(self, name):
"""Prepend the appropriate format macro for a structure to a structure type name.
Uses struct_macro, so just override that if you want to change behavior.
"""
return self.struct_macro + name
def makeExternalTypeName(self, name):
"""Prepend the appropriate format macro for an external type like uint32_t to a type name.
Uses external_macro, so just override that if you want to change behavior.
"""
return self.external_macro + name
def _implMakeProseList(self, elements, fmt, with_verb, comma_for_two_elts=False, serial_comma=True):
"""Internal-use implementation to make a (comma-separated) list for use in prose.
Adds a connective (by default, 'and')
before the last element if there are more than 1,
and only includes commas if there are more than 2
(if comma_for_two_elts is False).
Adds the right one of "is" or "are" to the end if with_verb is true.
Optionally adds a quantifier (like 'any') before a list of 2 or more,
if specified by fmt.
Don't edit these defaults, override self.makeProseList().
"""
assert(serial_comma) # didn't implement what we didn't need
if isinstance(fmt, str):
fmt = ProseListFormats.from_string(fmt)
my_elts = list(elements)
if len(my_elts) > 1:
my_elts[-1] = '{} {}'.format(fmt.connective, my_elts[-1])
if not comma_for_two_elts and len(my_elts) <= 2:
prose = ' '.join(my_elts)
else:
prose = ', '.join(my_elts)
quantifier = fmt.quantifier(len(my_elts))
parts = [quantifier, prose]
if with_verb:
if len(my_elts) > 1:
parts.append(' are')
else:
parts.append(' is')
return ''.join(parts)
@property
def file_suffix(self):
"""Return suffix of generated Asciidoctor files"""
raise NotImplementedError
def api_name(self, spectype=None):
"""Return API or specification name for citations in ref pages.
spectype is the spec this refpage is for.
'api' (the default value) is the main API Specification.
If an unrecognized spectype is given, returns None.
Must implement."""
raise NotImplementedError
def should_insert_may_alias_macro(self, genOpts):
"""Return true if we should insert a "may alias" macro in this file.
Only used by OpenXR right now."""
return False
@property
def command_prefix(self):
"""Return the expected prefix of commands/functions.
Implemented in terms of api_prefix."""
if not self._command_prefix:
self._command_prefix = self.api_prefix[:].replace('_', '').lower()
return self._command_prefix
@property
def type_prefix(self):
"""Return the expected prefix of type names.
Implemented in terms of command_prefix (and in turn, api_prefix)."""
if not self._type_prefix:
self._type_prefix = ''.join(
(self.command_prefix[0:1].upper(), self.command_prefix[1:]))
return self._type_prefix
@property
def api_prefix(self):
"""Return API token prefix.
Typically two uppercase letters followed by an underscore.
Must implement."""
raise NotImplementedError
@property
def api_version_prefix(self):
"""Return API core version token prefix.
Implemented in terms of api_prefix.
May override."""
return self.api_prefix + 'VERSION_'
@property
def KHR_prefix(self):
"""Return extension name prefix for KHR extensions.
Implemented in terms of api_prefix.
May override."""
return self.api_prefix + 'KHR_'
@property
def EXT_prefix(self):
"""Return extension name prefix for EXT extensions.
Implemented in terms of api_prefix.
May override."""
return self.api_prefix + 'EXT_'
def writeFeature(self, featureExtraProtect, filename):
"""Return True if OutputGenerator.endFeature should write this feature.
Defaults to always True.
Used in COutputGenerator.
May override."""
return True
def requires_error_validation(self, return_type):
"""Return True if the return_type element is an API result code
requiring error validation.
Defaults to always False.
May override."""
return False
@property
def required_errors(self):
"""Return a list of required error codes for validation.
Defaults to an empty list.
May override."""
return []
def is_voidpointer_alias(self, tag, text, tail):
"""Return True if the declaration components (tag,text,tail) of an
element represents a void * type.
Defaults to a reasonable implementation.
May override."""
return tag == 'type' and text == 'void' and tail.startswith('*')
def make_voidpointer_alias(self, tail):
"""Reformat a void * declaration to include the API alias macro.
Defaults to a no-op.
Must override if you actually want to use this feature in your project."""
return tail
def category_requires_validation(self, category):
"""Return True if the given type 'category' always requires validation.
Defaults to a reasonable implementation.
May override."""
return category in CATEGORIES_REQUIRING_VALIDATION
def type_always_valid(self, typename):
"""Return True if the given type name is always valid (never requires validation).
This is for things like integers.
Defaults to a reasonable implementation.
May override."""
return typename in TYPES_KNOWN_ALWAYS_VALID
@property
def should_skip_checking_codes(self):
"""Return True if more than the basic validation of return codes should
be skipped for a command."""
return False
@property
def generate_index_terms(self):
"""Return True if asiidoctor index terms should be generated as part
of an API interface from the docgenerator."""
return False
@property
def generate_enum_table(self):
"""Return True if asciidoctor tables describing enumerants in a
group should be generated as part of group generation."""
return False
@property
def generate_max_enum_in_docs(self):
"""Return True if MAX_ENUM tokens should be generated in
documentation includes."""
return False
def extension_include_string(self, ext):
"""Return format string for include:: line for an extension appendix
file. ext is an object with the following members:
- name - extension string string
- vendor - vendor portion of name
- barename - remainder of name
Must implement."""
raise NotImplementedError
@property
def refpage_generated_include_path(self):
"""Return path relative to the generated reference pages, to the
generated API include files.
Must implement."""
raise NotImplementedError
def valid_flag_bit(self, bitpos):
"""Return True if bitpos is an allowed numeric bit position for
an API flag.
Behavior depends on the data type used for flags (which may be 32
or 64 bits), and may depend on assumptions about compiler
handling of sign bits in enumerated types, as well."""
return True
| |
# -*- coding: utf-8 -*-
#
import requests
errors = {-1: 'unexpected error',
1: 'validation error',
2: 'email_address not specified',
3: 'password not specified',
4: 'invalid email address password combination',
5: 'both zip code and lat lon specified',
6: 'lat specified without lon',
7: 'lon specified without lat',
8: 'zip code must be 5 digits long',
9: 'invalid lat value',
10: 'invalid lon value',
11: 'invalid radius value',
12: 'unknown user_id',
13: 'unknown user_id_proxy',
14: 'both user_id and user_id_proxy specified',
15: 'neither user_id nor user_id_proxy specified',
16: 'unknown tracking id',
17: 'permission denied, not owner of tracking id',
18: 'unknown namespace',
19: 'duplicate tracking id',
20: 'namespace not specified',
21: 'value not specified',
22: 'id not specified',
23: 'unknown coupon id',
24: 'coupon id not specified',
25: 'coupon expired',
26: 'coupon activation cap reached',
27: 'coupon already activated by user',
28: 'user must have at least one tracking id registered to activate coupon',
29: 'retailer id not specified',
30: 'unknown retailer id',
31: 'coupon activated on the same card in another service',
32: 'user_id_proxy not specified',
33: 'duplicate user_id_proxy',
34: 'email address is already associated with an account',
37: 'the eCoupon could not be activated because it is not available at: Store 1, Store 2, ...',
38: 'user_id not specified',
50: 'payout_type not specified',
51: 'invalid payout type',
52: 'amount not specified',
53: 'invalid amount',
54: 'insufficient funds',
55: 'paypal_email_address not specified',
56: 'invalid paypal_email_address',
57: 'bank_account_type not specified',
58: 'invalid bank_account_type',
59: 'bank_routing_number not specified',
60: 'invalid bank_routing_number',
61: 'bank_account_number not specified',
62: 'invalid bank_account_number',
63: 'email address not confirmed',
64: 'tracking id already registered in another account',
70: 'invalid product',
80: 'fb_uid not specified',
81: 'Unable to save the Facebook UID'
}
class SavingStarAPI(object):
def __init__(self, access_token=None, client_id=None, client_secret=None):
self.access_token = access_token
self.client_id = client_id
self.client_secret = client_secret
# OAuth
#
if self.access_token is None:
x = self._getOAuthToken()
if 'access_token' in x.keys():
self.access_token = x['access_token']
def getAccessToken(self):
return self.access_token
def _getOAuthToken(self):
url = "https://api.savingstar.com/oauth/access_token"
payload = {'client_id': self.client_id,
'client_secret': self.client_secret,
'grant_type': 'none'
}
return self._doPost(url, payload)
def _doGet(self, url):
headers = {"Authorization": "OAuth " + self.access_token,
"Accept": "application/vnd.savingstar; version=2"
}
resp = requests.get(url, headers=headers)
if resp.status_code == 200:
return resp.json()
else:
try:
return resp.json()
except ValueError:
return {'error': {'code': resp.status_code,
'message': resp.text
}
}
def _doPost(self, url, payload):
if self.access_token is not None:
headers = {"Authorization": "OAuth " + self.access_token,
"Accept": "application/vnd.savingstar; version=2",
"Content-Type": "application/x-www-form-urlencoded"
}
resp = requests.post(url, headers=headers, data=payload)
else:
resp = requests.post(url, data=payload)
if resp.status_code == 200:
return resp.json()
else:
try:
return resp.json()
except ValueError:
return {'error': {'code': resp.status_code,
'message': resp.text
}
}
def _doDelete(self, url, payload):
if self.access_token is not None:
headers = {"Authorization": "OAuth " + self.access_token,
"Accept": "application/vnd.savingstar; version=2",
"Content-Type": "application/x-www-form-urlencoded"
}
resp = requests.delete(url, headers=headers, data=payload)
else:
resp = requests.delete(url, data=payload)
if resp.status_code == 200:
return resp.json()
else:
try:
return resp.json()
except ValueError:
return {'error': {'code': resp.status_code,
'message': resp.text
}
}
# Coupon-related
#
def getAllCoupons(self):
url = "https://api.savingstar.com/coupons.json"
return self._doGet(url)
def activateCoupon(self,
coupon_id,
user_id=None,
user_id_proxy=None,
channel=1):
'''Note:
for channel:
1 - web
2 - iPhone
3 - android
'''
url = "https://api.savingstar.com/coupons.json"
payload = {'id': coupon_id,
'user_id': user_id,
'channel': channel
}
return self._doPost(url, payload)
def externalCouponActivate(self,
external_coupon_id,
tracking_id_value,
retailer_id):
url = "https://api.savingstar.com/coupons/external_activation.json"
payload = {'external_coupon_id': external_coupon_id,
'tracking_id_value': tracking_id_value,
'retailer_id': retailer_id
}
return self._doPost(url, payload)
def getCouponStatuses(self):
url = "https://api.savingstar.com/coupons/status.json"
return self._doGet(url)
def couponProductSearch(self, coupon_id, product):
url = "https://api.savingstar.com/coupons/{0}/products.json?product={1}".format(coupon_id, product)
return self._doGet(url)
def getIncludedProducts(self, coupon_id):
url = "https://api.savingstar.com/coupons/{0}/included_products.json".format(coupon_id)
return self._doGet(url)
# Retailer_Location-related
#
def getRetailerLocationsByZip(self, zipcode, radius=30):
url = "https://api.savingstar.com/retailer_locations.json?zipcode={0}&radius={1}".format(zipcode, radius)
return self._doGet(url)
def getRetailerLocationsByLatLng(self, lat, lng, radius=30):
url = "https://api.savingstar.com/retailer_locations.json?lat={0}&lon={1}&radius={2}".format(lat, lng, radius)
return self._doGet(url)
# Retailer-related
#
def getAllRetailers(self):
url = "https://api.savingstar.com/retailers.json"
return self._doGet(url)
def getRetailer(self, retailer_id):
url = "https://api.savingstar.com/{0}.json".format(retailer_id)
return self._doGet(url)
def retailerGeographicalSearch(self,
lat=None,
lng=None,
zipcode=None,
radius=10):
if zipcode is None:
url = "https://api.savingstar.com/retailers.json?lat={0}&lon={1}&radius={2}".format(lat, lng, radius)
else:
url = "https://api.savingstar.com/retailers.json?zipcode={0}&radius={1}".format(zipcode, radius)
return self._doGet(url)
def retailerSpecificLocationGeographicalSearch(self,
retailer_id,
lat=None,
lng=None,
zipcode=None,
radius=10):
if zipcode is None:
url = "https://api.savingstar.com/retailers/{0}/retailer_locations.json?lat={1}&lon={2}&radius={3}".format(retailer_id, lat, lng, radius)
else:
url = "https://api.savingstar.com/retailers/{0}/retailer_locations.json?zipcode={1}&radius={2}".format(retailer_id, zipcode, radius)
return self._doGet(url)
# Tracking_id-related
#
def getRegisteredTrackingIds(self, user_id=None, user_id_proxy=None):
if user_id_proxy is None:
url = "https://api.savingstar.com/tracking_ids.json?user_id={0}".format(user_id)
else:
url = "https://api.savingstar.com/tracking_ids.json?user_id_proxy={0}".format(user_id_proxy)
return self._doGet(url)
def getRegisteredTrackingId(self,
tracking_id,
user_id=None,
user_id_proxy=None):
if user_id_proxy is None:
url = "https://api.savingstar.com/{0}.json?user_id={1}".format(tracking_id, user_id)
else:
url = "https://api.savingstar.com/{0}.json?user_id_proxy={1}".format(tracking_id, user_id_proxy)
return self._doGet(url)
def registerTrackingId(self,
namespace,
retailer_id,
value,
user_id=None,
user_id_proxy=None):
url = "https://api.savingstar.com/tracking_ids.json"
if user_id_proxy is None:
payload = {'user_id': user_id}
else:
payload = {'user_id_proxy': user_id_proxy}
payload['namespace'] = namespace
payload['retailer_id'] = retailer_id
payload['value'] = value
return self._doPost(url, payload)
def removeTrackingId(self, id, user_id=None, user_id_proxy=None):
url = "https://api.savingstar.com/tracking_ids.json"
if user_id_proxy is None:
payload = {'user_id': user_id}
else:
payload = {'user_id_proxy': user_id_proxy}
payload['id'] = id
return self._doDelete(url, payload)
# User-related
#
def createUser(self, email_address, password):
url = "https://api.savingstar.com/users.json"
payload = {'email_address': email_address,
'password': password
}
return self._doPost(url, payload)
def authenticateUser(self, email_address, password):
url = "https://api.savingstar.com/auth.json"
payload = {'email_address': email_address,
'password': password
}
return self._doPost(url, payload)
def resetPassword(self, user_id):
url = "https://api.savingstar.com/reset_password.json"
payload = {'user_id': user_id}
return self._doPost(url, payload)
def createProxyUser(self, user_id_proxy):
url = "https://api.savingstar.com/proxy.json"
payload = {'user_id_proxy': user_id_proxy}
return self._doPost(url, payload)
def getAccountDetails(self, user_id=None, user_id_proxy=None):
if user_id_proxy is None:
url = "https://api.savingstar.com/account.json?user_id={0}".format(user_id)
else:
url = "https://api.savingstar.com/account.json?user_id_proxy={0}".format(user_id_proxy)
return self._doGet(url)
def getAccountHistory(self, user_id=None, user_id_proxy=None, lookback=90):
if user_id_proxy is None:
url = "https://api.savingstar.com/history.json?user_id={0}&lookback={1}".format(user_id, lookback)
else:
url = "https://api.savingstar.com/history.json?user_id_proxy={0}&lookback={1}".format(user_id_proxy, lookback)
return self._doGet(url)
def requestPayout(self,
payout_type,
amount,
user_id=None,
user_id_proxy=None,
paypal_email_address=None,
bank_account_type=None,
bank_account_number=None,
bank_routing_number=None):
url = "https://api.savingstar.com/payout_request.json"
if user_id_proxy is None:
payload = {'user_id': user_id}
else:
payload = {'user_id_proxy': user_id_proxy}
payload['payout_type'] = payout_type
payload['amount'] = amount
if payout_type == 'paypal':
payload['paypal_email_address'] = paypal_email_address
if payout_type == 'ach':
payload['bank_account_type'] = bank_account_type
payload['bank_account_number'] = bank_account_number
payload['bank_routing_number'] = bank_routing_number
return self._doPost(url, payload)
| |
"""Verilog simulator modules."""
from __future__ import absolute_import
import subprocess
import sys
import os
import ctypes
from .. import _api_internal
from .._ffi.base import string_types
from .._ffi.node import NodeBase, register_node
from .._ffi.function import register_func
from . import util
@register_node
class VPISession(NodeBase):
"""Verilog session"""
def __init__(self, handle):
super(VPISession, self).__init__(handle)
self.proc = None
self.execpath = None
self.yield_callbacks = []
def __del__(self):
self.proc.kill()
try:
super(VPISession, self).__del__()
except AttributeError:
pass
def arg(self, index):
"""Get handle passed to host session.
Parameters
----------
index : int
The index value.
Returns
-------
handle : VPIHandle
The handle
"""
return _api_internal._vpi_SessGetArg(self, index)
def __getitem__(self, name):
if not isinstance(name, string_types):
raise ValueError("have to be string types")
return _api_internal._vpi_SessGetHandleByName(self, name)
def __getattr__(self, name):
return _api_internal._vpi_SessGetHandleByName(self, name)
def yield_until_next_cycle(self):
"""Yield until next posedge"""
for f in self.yield_callbacks:
f()
return _api_internal._vpi_SessYield(self)
def shutdown(self):
"""Shutdown the simulator"""
return _api_internal._vpi_SessShutdown(self)
@register_node
class VPIHandle(NodeBase):
"""Handle to a verilog variable."""
def __init__(self, handle):
super(VPIHandle, self).__init__(handle)
self._name = None
self._size = None
def get_int(self):
"""Get integer value from handle.
Returns
-------
value : int
"""
return _api_internal._vpi_HandleGetInt(self)
def put_int(self, value):
"""Put integer value to handle.
Parameters
----------
value : int
The value to put
"""
return _api_internal._vpi_HandlePutInt(self, value)
@property
def name(self):
if self._name is None:
self._name = _api_internal._vpi_HandleGetName(self)
return self._name
@property
def size(self):
if self._size is None:
self._size = _api_internal._vpi_HandleGetSize(self)
return self._size
def __getitem__(self, name):
if not isinstance(name, string_types):
raise ValueError("have to be string types")
return _api_internal._vpi_HandleGetHandleByName(self, name)
def __getattr__(self, name):
return _api_internal._vpi_HandleGetHandleByName(self, name)
def _find_vpi_path():
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
api_path = os.path.join(curr_path, '../../../lib/')
vpi_path = [curr_path, api_path]
vpi_path = [os.path.join(p, 'tvm_vpi.vpi') for p in vpi_path]
vpi_found = [p for p in vpi_path if os.path.exists(p) and os.path.isfile(p)]
if vpi_found:
return os.path.dirname(vpi_found[0])
else:
raise ValueError("Cannot find tvm_vpi.vpi, make sure you did `make verilog`")
def search_path():
"""Get the search directory."""
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
ver_path = [os.path.join(curr_path, '../../../verilog/')]
ver_path += [os.path.join(curr_path, '../../../tests/verilog/unittest/')]
ver_path += [os.path.join(curr_path, '../../../tests/verilog/integration/')]
return ver_path
def find_file(file_name):
"""Find file in the search directories.
Parameters
----------
file_name : str
The file name
Return
------
file_name : str
The absolute path to the file, raise Error if cannot find it.
"""
ver_path = search_path()
flist = [os.path.join(p, file_name) for p in ver_path]
found = [p for p in flist if os.path.exists(p) and os.path.isfile(p)]
if not found:
raise ValueError("Cannot find %s in %s" % (file_name, flist))
return found[0]
def compile_file(file_name, file_target, options=None):
"""Compile verilog via iverilog
Parameters
----------
file_name : str or list of str
The cuda code.
file_target : str
The target file.
"""
cmd = ["iverilog"]
for path in search_path():
cmd += ["-I%s" % path]
cmd += ["-o", file_target]
if options:
cmd += options
if isinstance(file_name, string_types):
file_name = [file_name]
cmd += file_name
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
if proc.returncode != 0:
raise ValueError("Compilation error:\n%s" % out)
def session(file_names, codes=None):
"""Create a new iverilog session by compile the file.
Parameters
----------
file_names : str or list of str
The name of the file
codes : str or list of str
The code in str.
Returns
-------
sess : VPISession
The created session.
"""
if isinstance(file_names, string_types):
file_names = [file_names]
path = util.tempdir()
if codes:
if isinstance(codes, (list, tuple)):
codes = '\n'.join(codes)
fcode = path.relpath("temp_code.v")
with open(fcode, "w") as out_file:
out_file.write(codes)
file_names.append(fcode)
for name in file_names:
if not os.path.exists(name):
raise ValueError("Cannot find file %s" % name)
target = path.relpath(os.path.basename(file_names[0].rsplit(".", 1)[0]))
compile_file(file_names, target)
vpi_path = _find_vpi_path()
cmd = ["vvp"]
cmd += ["-M", vpi_path]
cmd += ["-m", "tvm_vpi"]
cmd += [target]
env = os.environ.copy()
read_device, write_host = os.pipe()
read_host, write_device = os.pipe()
if sys.platform == "win32":
import msvcrt
env['TVM_DREAD_PIPE'] = str(msvcrt.get_osfhandle(read_device))
env['TVM_DWRITE_PIPE'] = str(msvcrt.get_osfhandle(write_device))
read_host = msvcrt.get_osfhandle(read_host)
write_host = msvcrt.get_osfhandle(write_host)
else:
env['TVM_DREAD_PIPE'] = str(read_device)
env['TVM_DWRITE_PIPE'] = str(write_device)
env['TVM_HREAD_PIPE'] = str(read_host)
env['TVM_HWRITE_PIPE'] = str(write_host)
try:
# close_fds does not work well for all python3
# Use pass_fds instead.
# pylint: disable=unexpected-keyword-arg
pass_fds = (read_device, write_device, read_host, write_host)
proc = subprocess.Popen(cmd, pass_fds=pass_fds, env=env)
except TypeError:
# This is effective for python2
proc = subprocess.Popen(cmd, close_fds=False, env=env)
# close device side pipe
os.close(read_device)
os.close(write_device)
sess = _api_internal._vpi_SessMake(read_host, write_host)
sess.proc = proc
sess.execpath = path
return sess
@register_func
def tvm_callback_verilog_simulator(code, *args):
"""Callback by TVM runtime to invoke verilog simulator
Parameters
----------
code : str
The verilog code to be simulated
args : list
Additional arguments to be set.
"""
libs = [
find_file("tvm_vpi_mmap.v")
]
sess = session(libs, code)
for i, value in enumerate(args):
vpi_h = sess.main["tvm_arg%d" % i]
if isinstance(value, ctypes.c_void_p):
int_value = int(value.value)
elif isinstance(value, int):
int_value = value
else:
raise ValueError(
"Do not know how to handle value type %s" % type(value))
vpi_h.put_int(int_value)
rst = sess.main.rst
done = sess.main.done
# start driving
rst.put_int(1)
sess.yield_until_next_cycle()
rst.put_int(0)
sess.yield_until_next_cycle()
while not done.get_int():
sess.yield_until_next_cycle()
sess.yield_until_next_cycle()
sess.shutdown()
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
--- Day 22: Wizard Simulator 20XX ---
Little Henry Case decides that defeating bosses
with swords and stuff is boring.
Now he's playing the game with a wizard.
Of course, he gets stuck on another boss and needs your help again.
In this version, combat still proceeds with
the player and the boss taking alternating turns.
The player still goes first.
Now, however, you don't get any equipment;
instead, you must choose one of your spells to cast.
The first character at or below 0 hit points loses.
Since you're a wizard, you don't get to wear armor,
and you can't attack normally.
However, since you do magic damage, your opponent's armor is ignored,
and so the boss effectively has zero armor as well.
As before, if armor (from a spell, in this case)
would reduce damage below 1,it becomes 1 instead -
that is, the boss' attacks always deal at least 1 damage.
On each of your turns, you must select one of your spells to cast.
If you cannot afford to cast any spell, you lose.
Spells cost mana; you start with 500 mana, but have no maximum limit.
You must have enough mana to cast a spell,
and its cost is immediately deducted when you cast it.
Your spells are Magic Missile, Drain, Shield, Poison, and Recharge.
Magic Missile costs 53 mana.
It instantly does 4 damage.
Drain costs 73 mana.
It instantly does 2 damage and heals you for 2 hit points.
Shield costs 113 mana.
It starts an effect that lasts for 6 turns.
While it is active, your armor is increased by 7.
Poison costs 173 mana.
It starts an effect that lasts for 6 turns.
At the start of each turn while it is active, it deals the boss 3 damage.
Recharge costs 229 mana.
It starts an effect that lasts for 5 turns.
At the start of each turn while it is active, it gives you 101 new mana.
Effects all work the same way.
Effects apply at the start of both the player's turns and the boss' turns.
Effects are created with a timer (the number of turns they last);
at the start of each turn, after they apply any effect they have,
their timer is decreased by one.
If this decreases the timer to zero, the effect ends.
You cannot cast a spell that would start an effect which is already active.
However, effects can be started on the same turn they end.
For example, suppose the player has 10 hit points and 250 mana,
and that the boss has 13 hit points and 8 damage:
-- Player turn --
- Player has 10 hit points, 0 armor, 250 mana
- Boss has 13 hit points
Player casts Poison.
-- Boss turn --
- Player has 10 hit points, 0 armor, 77 mana
- Boss has 13 hit points
Poison deals 3 damage; its timer is now 5.
Boss attacks for 8 damage.
-- Player turn --
- Player has 2 hit points, 0 armor, 77 mana
- Boss has 10 hit points
Poison deals 3 damage; its timer is now 4.
Player casts Magic Missile, dealing 4 damage.
-- Boss turn --
- Player has 2 hit points, 0 armor, 24 mana
- Boss has 3 hit points
Poison deals 3 damage. This kills the boss, and the player wins.
Now, suppose the same initial conditions,
except that the boss has 14 hit points instead:
-- Player turn --
- Player has 10 hit points, 0 armor, 250 mana
- Boss has 14 hit points
Player casts Recharge.
-- Boss turn --
- Player has 10 hit points, 0 armor, 21 mana
- Boss has 14 hit points
Recharge provides 101 mana; its timer is now 4.
Boss attacks for 8 damage!
-- Player turn --
- Player has 2 hit points, 0 armor, 122 mana
- Boss has 14 hit points
Recharge provides 101 mana; its timer is now 3.
Player casts Shield, increasing armor by 7.
-- Boss turn --
- Player has 2 hit points, 7 armor, 110 mana
- Boss has 14 hit points
Shield's timer is now 5.
Recharge provides 101 mana; its timer is now 2.
Boss attacks for 8 - 7 = 1 damage!
-- Player turn --
- Player has 1 hit point, 7 armor, 211 mana
- Boss has 14 hit points
Shield's timer is now 4.
Recharge provides 101 mana; its timer is now 1.
Player casts Drain, dealing 2 damage, and healing 2 hit points.
-- Boss turn --
- Player has 3 hit points, 7 armor, 239 mana
- Boss has 12 hit points
Shield's timer is now 3.
Recharge provides 101 mana; its timer is now 0.
Recharge wears off.
Boss attacks for 8 - 7 = 1 damage!
-- Player turn --
- Player has 2 hit points, 7 armor, 340 mana
- Boss has 12 hit points
Shield's timer is now 2.
Player casts Poison.
-- Boss turn --
- Player has 2 hit points, 7 armor, 167 mana
- Boss has 12 hit points
Shield's timer is now 1.
Poison deals 3 damage; its timer is now 5.
Boss attacks for 8 - 7 = 1 damage!
-- Player turn --
- Player has 1 hit point, 7 armor, 167 mana
- Boss has 9 hit points
Shield's timer is now 0.
Shield wears off, decreasing armor by 7.
Poison deals 3 damage; its timer is now 4.
Player casts Magic Missile, dealing 4 damage.
-- Boss turn --
- Player has 1 hit point, 0 armor, 114 mana
- Boss has 2 hit points
Poison deals 3 damage. This kills the boss, and the player wins.
You start with 50 hit points and 500 mana points.
The boss's actual stats are in your puzzle input.
What is the least amount of mana you can spend and still win the fight?
(Do not include mana recharge effects as "spending" negative mana.) """
# parse input to get boss stats
with open('./input.txt', 'r') as f:
import re
pattern = re.compile(r'(\d+)')
BOSS_STATS = [int(pattern.findall(x)[0]) for x in f.readlines()]
# set player's starting stats
PLAYER_HP = 50
PLAYER_MANA = 500
class Game(object):
"""Game state object. Represents the state of Game at some instance."""
class Player(object):
"""Player in the game, the protagonist."""
def __init__(self):
self.hp = PLAYER_HP
self.mana = PLAYER_MANA
self.damage = 0
self.armor = 0
self.mana_spent = 0
def __str__(self):
return 'player'
class Monster(object):
"""Boss / Monster. Battles the Player."""
def __init__(self):
self.hp = BOSS_STATS[0]
self.damage = BOSS_STATS[1]
def __str__(self):
return 'monster'
def __init__(self):
# depicts the winner in the game through a reference
# If the player wins, winner = player
# If the monster wins, winner = monster
# Can be checked using:
# type(winner) == Game.Player or Game.Monster
self.winner = None
# create a new player for this game
self.player = self.Player()
# create a new monster for this game
self.monster = self.Monster()
# indicators for spells that last more than one turn
self.poison = 0
self.shield = 0
self.recharge = 0
@classmethod
def makegame(cls, game=None):
"""Make a copy of the game by copying its values.
Accepts another game to copy from, or returns a new empty game."""
# If no game is provided as reference, return a new empty game.
if game is None:
return cls()
# Create a copy of the provided game.
new_game = cls()
new_game.winner = game.winner
new_game.poison = game.poison
new_game.shield = game.shield
new_game.recharge = game.recharge
new_game.player.hp = game.player.hp
new_game.player.mana = game.player.mana
new_game.player.damage = game.player.damage
new_game.player.armor = game.player.armor
new_game.player.mana_spent = game.player.mana_spent
new_game.monster.hp = game.monster.hp
return new_game
# Spells and their effects
spells = {
'magic_missile': {
'mana': 53,
'damage': 4,
},
'drain': {
'mana': 73,
'damage': 2,
'heal': 2,
},
'shield': {
'mana': 113,
'armor': 7,
'turns': 6,
},
'poison': {
'mana': 173,
'damage': 3,
'turns': 6,
},
'recharge': {
'mana': 229,
'turns': 5,
'recharge_mana': 101,
},
}
def check_game_over(game):
"""Check if someone has died, and it's GAME OVER."""
# If the player's HP is gone, the monster has won
if game.player.hp <= 0:
game.winner = game.monster
return True
# If the monster's HP is gone, the player has won
if game.monster.hp <= 0:
game.winner = game.player
return True
# No one has died, the game is still ON
return False
def apply_spells(game):
"""Apply the active spell effects on game."""
# If shield is on, put on armor
if game.shield > 0:
game.shield -= 1
game.player.armor = spells['shield']['armor']
elif game.shield == 0:
game.player.armor = 0
# If poison is on, hurt the monster
if game.poison > 0:
game.poison -= 1
game.monster.hp -= spells['poison']['damage']
# If recharge is on, make a mana potion, drink it,
# get mana added to the mana points
if game.recharge > 0:
game.recharge -= 1
game.player.mana += spells['recharge']['recharge_mana']
def player_plays(game, spell):
"""Player's turn, cast a spell, and apply it's effects."""
# Check if someone (the player) has died and it's GAME OVER.
if check_game_over(game):
return
# Apply the effects of the spell currently active
apply_spells(game)
# If someone has died, the game is over.
if check_game_over(game):
return
# If the player does not have enough mana to cast another spell,
# it's already game over. Declare the monster as the winner.
if game.player.mana < spells['magic_missile']['mana']:
game.winner = game.monster
return
# Check if player has enough mana, Cast the spell, and apply it's effects.
# If player doesn't have enough mana to cast that spell,
# consider it's GAME OVER, with the monster as the winner,
# and return so that this move can be considered invalid.
if spell == 'magic_missile' and game.player.mana >= spells[spell]['mana']:
game.player.mana -= spells[spell]['mana']
game.player.mana_spent += spells[spell]['mana']
game.monster.hp -= spells[spell]['damage']
elif spell == 'drain' and game.player.mana >= spells[spell]['mana']:
game.player.mana -= spells[spell]['mana']
game.player.mana_spent += spells[spell]['mana']
game.player.hp += spells[spell]['heal']
game.monster.hp -= spells[spell]['damage']
elif spell == 'shield' and game.shield == 0 and \
game.player.mana >= spells[spell]['mana']:
game.player.mana -= spells[spell]['mana']
game.player.mana_spent += spells[spell]['mana']
game.shield = spells[spell]['turns']
elif spell == 'poison' and game.poison == 0 and \
game.player.mana >= spells[spell]['mana']:
game.player.mana -= spells[spell]['mana']
game.player.mana_spent += spells[spell]['mana']
game.poison = spells[spell]['turns']
elif spell == 'recharge' and game.recharge == 0 and \
game.player.mana >= spells[spell]['mana']:
game.player.mana -= spells[spell]['mana']
game.player.mana_spent += spells[spell]['mana']
game.recharge = spells[spell]['turns']
else:
game.winner = game.monster
check_game_over(game)
def monster_plays(game):
"""Monster's turn. Hurt the player. That's all it does."""
# Apply the effects of active spells.
apply_spells(game)
# Check if someone has died, and it's GAME OVER.
if check_game_over(game):
return
# Hurt the player.
# If the player has some armour, the minimum damage
# done is still 1HP
game.player.hp -= max(game.monster.damage - game.player.armor, 1)
check_game_over(game)
def game_states(gamestate):
"""Generate all possible game states for given game state.
Will apply all spells possible, and if the spell results in a winner,
or is not invalid, yields the game state."""
# Go over each spell available
for spell in spells.keys():
# Create a copy of the game,
# so that it is a new game state
# and the original can be reused again
game = Game.makegame(gamestate)
# Player plays first step
player_plays(game, spell)
# If the player has won, return this game state
if game.winner:
if type(game.winner) == Game.Player:
yield game
# If the monster has won, or even when continuing
# from the previous yield,
# go to the next loop iteration
continue
# Monster plays next
monster_plays(game)
# Check if the winner isn't the monster.
# That means the winner is the Player, or there is no winner.
# Both are valid states.
if type(game.winner) != Game.Monster:
yield game
class Stack(list):
"""Wrapper over python's list just to use 'push' instead of 'append'."""
def push(self, obj):
"""Does the same thing as append: puts object at end of list."""
return self.append(obj)
# Declare a stack to hold the game states
stack = Stack()
# Add the first, empty game state to the stack
stack.push(Game.makegame())
# Hold the minimum mana costs.
# Set this to some sane high value,
# so that the first value gets set as min automatically.
min_mana_cost = 999999
# Go over each game state as long as they're in the stack.
while(stack):
# Take the most recent game state out, and act on it.
# This follows the DFS principle: Depth-First-Search
# Go visiting the nodes and leaves of a tree before all its branches
game = stack.pop()
# OPTIMIZATION:
# Iterate only over those game states that have a possibility
# of generating the min value.
# Check whether their current mana cost is more than the previous cost
# If not, they're not eligible.
if game.player.mana_spent > min_mana_cost:
continue
# Check if the current game state has a winner, and it's the player.
# If it is, see if it's mana cost is a new minimum.
if game.winner is not None:
if type(game.winner) == Game.Player:
min_mana_cost = min(min_mana_cost, game.player.mana_spent)
continue
# go over each state possible for this game state
for state in game_states(game):
# check if there's no winner, and put it back on stack
# if game.winner is None:
stack.push(state)
print(min_mana_cost)
| |
#!/usr/bin/python
"""arranges results mgzs into a dataframe for further analyses
"""
import matplotlib as mpl
# we do this because sometimes we run this without an X-server, and this backend doesn't need
# one. We set warn=False because the notebook uses a different backend and will spout out a big
# warning to that effect; that's unnecessarily alarming, so we hide it.
mpl.use('svg', warn=False)
import argparse
import warnings
import seaborn as sns
import pandas as pd
import numpy as np
import os
import nibabel as nib
import itertools
import re
import h5py
from matplotlib import pyplot as plt
from . import stimuli as sfp_stimuli
def _load_mgz(path):
"""load and reshape mgz so it's either 1d, instead of 3d
this will also make an mgz 2d instead of 4d, but we also want to rearrange the data some, which
this doesn't do
"""
# see http://pandas.pydata.org/pandas-docs/version/0.19.1/gotchas.html#byte-ordering-issues
return nib.load(path).get_data().byteswap().newbyteorder().squeeze()
def _arrange_helper(prf_dir, hemi, name, template, varea_mask, eccen_mask):
"""this small helper function is just to be called in a generator by _arrange_mgzs_into_dict
"""
tmp = _load_mgz(template % (prf_dir, hemi, name))
if tmp.ndim == 1:
tmp = tmp[(varea_mask[hemi]) & (eccen_mask[hemi])]
elif tmp.ndim == 2:
tmp = tmp[(varea_mask[hemi]) & (eccen_mask[hemi]), :]
if os.sep in name:
res_name = os.path.split(name)[-1]
elif '_' in name:
res_name = name.split('_')[-1]
elif '-' in name:
res_name = name.split('-')[-1]
else:
res_name = name
return "%s-%s" % (res_name, hemi), tmp
def _load_mat_file(path, results_names, varea_mask, eccen_mask):
"""load and reshape data from .mat file, so it's either 1 or 2d instead of 3 or 4d
this will open the mat file (we assume it's saved by matlab v7.3 and so use h5py to do so). we
then need to know the name of the mat_field (e.g., 'models', 'modelmd', 'modelse') to
grab. then, some times we don't want to grab the entire corresponding array but only some
subset (e.g., we don't want all bootstraps, all stimulus classes, we only want one stimulus
class, all bootstraps), so index, if non-None, specifies the index along the second dimension
(that's the one that indexes the stimulus classes) to take.
"""
mgzs = {}
with h5py.File(path, 'r') as f:
for var, index in results_names:
tmp_ref = f['results'][var]
if tmp_ref.shape == (2, 1):
# this is the case for all the models fields of the .mat file (modelse, modelmd,
# models). [0, 0] contains the hrf, and [1, 0] contains the actual results.
res = f[tmp_ref[1, 0]][:]
else:
# that reference thing is only necessary for those models fields, because I think
# they're matlab structs
res = tmp_ref[:]
for idx in index:
res_name = var
if idx is None:
tmp = res
else:
tmp = res[:, idx]
res_name += '_%02d' % idx
tmp = tmp.squeeze()
# in this case, the data is stimulus classes or bootstraps by voxels, and we want
# voxels first, so we transpose.
if tmp.ndim == 2:
tmp = tmp.transpose()
# because of how bidsGetPreprocData.m loads in the surface files, we know this is the left
# and right hemisphere concatenated together, in that order
for hemi in ['lh', 'rh']:
if hemi == 'lh':
tmper = tmp[:varea_mask['lh'].shape[0]]
else:
tmper = tmp[-varea_mask['rh'].shape[0]:]
if tmper.ndim == 1:
tmper = tmper[(varea_mask[hemi]) & (eccen_mask[hemi])]
elif tmper.ndim == 2:
tmper = tmper[(varea_mask[hemi]) & (eccen_mask[hemi]), :]
mgzs['%s-%s' % (res_name, hemi)] = tmper
return mgzs
def _arrange_mgzs_into_dict(benson_template_path, results_path, results_names, vareas,
eccen_range, benson_template_names=['varea', 'angle', 'eccen'],
prf_data_names=['sigma'], benson_atlas_type='bayesian_retinotopy'):
"""load in the mgzs, put in a dictionary, and return that dictionary
vareas: list of ints. which visual areas (as defined in the Benson visual area template) to
include. all others will be discarded.
eccen_range: 2-tuple of ints or floats. What range of eccentricities to include (as specified
in the Benson eccentricity template).
benson_template_names: list of labels that specify which output files to get from the Benson
retinotopy. The complete list is the default, ['varea', 'angle', 'sigma', 'eccen']. For this
analysis to work, must contain 'varea' and 'eccen'.
"""
varea_name = [i for i in benson_template_names if 'varea' in i]
eccen_name = [i for i in benson_template_names if 'eccen' in i]
if len(varea_name) != 1 or len(eccen_name) != 1:
raise Exception("Need Benson retinotopy files 'eccen' and 'varea'!")
mgzs = {}
varea_mask = {}
eccen_mask = {}
for hemi in ['lh', 'rh']:
varea_mask[hemi] = _load_mgz(benson_template_path % (benson_atlas_type, hemi, varea_name[0]))
varea_mask[hemi] = np.isin(varea_mask[hemi], vareas)
eccen_mask[hemi] = _load_mgz(benson_template_path % (benson_atlas_type, hemi, eccen_name[0]))
eccen_mask[hemi] = (eccen_mask[hemi] > eccen_range[0]) & (eccen_mask[hemi] < eccen_range[1])
# these are all mgzs
for hemi, var in itertools.product(['lh', 'rh'], benson_template_names):
k, v = _arrange_helper(benson_atlas_type, hemi, var, benson_template_path, varea_mask,
eccen_mask)
mgzs[k] = v
# these are all mgzs
for hemi, var in itertools.product(['lh', 'rh'], prf_data_names):
k, v = _arrange_helper('data', hemi, var, benson_template_path, varea_mask, eccen_mask)
mgzs[k] = v
# these all live in the results.mat file produced by GLMdenoise
mgzs.update(_load_mat_file(results_path, results_names, varea_mask, eccen_mask))
return mgzs
def _unfold_2d_mgz(mgz, value_name, variable_name, mgz_name, hemi=None):
tmp = pd.DataFrame(mgz)
tmp = pd.melt(tmp.reset_index(), id_vars='index')
if hemi is not None:
tmp['hemi'] = hemi
tmp = tmp.rename(columns={'index': 'voxel', 'variable': variable_name, 'value': value_name})
if 'models_' in mgz_name:
# then the value name contains which stimulus class this and the actual value_name is
# amplitude_estimate
class_idx = re.search('models_([0-9]+)', mgz_name).groups()
assert len(class_idx) == 1, "models title %s should only contain one number, to identify stimulus class!" % value_name
tmp['stimulus_class'] = int(class_idx[0])
return tmp
def _add_freq_metainfo(stim_df):
"""this function takes the stim_df and adds some metainfo based on the stimulus frequency
right now these are: stimulus_superclass (angular, radial, etc), freq_space_angle (the angle
in our 2d frequency space) and freq_space_distance (distance from the origin in our 2d
frequency space)
"""
# stimuli belong to five super classes, or paths through the frequency space: w_r=0; w_a=0;
# w_r=w_a; w_r=-w_a; and sqrt(w_r^2 + w_a^)=32. We want to be able to look at them separately,
# so we label them (this is inefficient but works). We also want to get some other identifying
# values. We do this all at once because the major time cost comes from applying this to all
# rows, not the computations themselves
def freq_identifier_logpolar(x):
if x.w_r == 0 and x.w_a == 0:
sc = 'baseline'
elif x.w_r == 0 and x.w_a != 0:
sc = 'angular'
elif x.w_r != 0 and x.w_a == 0:
sc = 'radial'
elif x.w_r == x.w_a:
sc = 'forward spiral'
elif x.w_r == -x.w_a:
sc = 'reverse spiral'
else:
sc = 'mixtures'
return sc, np.arctan2(x.w_a, x.w_r), np.sqrt(x.w_r**2 + x.w_a**2)
def freq_identifier_constant(x):
if x.w_x == 0 and x.w_y == 0:
sc = 'baseline'
elif x.w_x == 0 and x.w_y != 0:
sc = 'horizontal'
elif x.w_x != 0 and x.w_y == 0:
sc = 'vertical'
elif x.w_x == x.w_y:
sc = 'forward diagonal'
elif x.w_x == -x.w_y:
sc = 'reverse diagonal'
else:
sc = 'off-diagonal'
return sc, np.arctan2(x.w_y, x.w_x), np.sqrt(x.w_x**2 + x.w_y**2)
try:
stim_df.loc[(stim_df['w_r'].isnull()) & (stim_df['w_a'].isnull()), ['w_r', 'w_a']] = (0, 0)
properties_list = stim_df[['w_r', 'w_a']].apply(freq_identifier_logpolar, 1)
except KeyError:
stim_df.loc[(stim_df['w_x'].isnull()) & (stim_df['w_y'].isnull()), ['w_x', 'w_y']] = (0, 0)
properties_list = stim_df[['w_x', 'w_y']].apply(freq_identifier_constant, 1)
sc = pd.Series([i[0] for i in properties_list.values], properties_list.index)
ang = pd.Series([i[1] for i in properties_list.values], properties_list.index)
dist = pd.Series([i[2] for i in properties_list.values], properties_list.index)
stim_df['stimulus_superclass'] = sc
# get these between 0 and 2*pi, like the local spatial frequency angles
stim_df['freq_space_angle'] = np.mod(ang, 2*np.pi)
stim_df['freq_space_distance'] = dist
return stim_df
def _setup_mgzs_for_df(mgzs, results_names, df_mode, hemi=None,
benson_template_names=['varea', 'angle', 'eccen'],
prf_data_names=['sigma']):
df = None
if hemi is None:
mgz_key = '%s'
else:
mgz_key = '%s-{}'.format(hemi)
for brain_name in results_names:
if df_mode == 'summary':
value_name = {'modelmd': 'amplitude_estimate_median',
'modelse': 'amplitude_estimate_std_error'}.get(brain_name)
tmp = _unfold_2d_mgz(mgzs[mgz_key % brain_name], value_name,
'stimulus_class', brain_name, hemi)
elif df_mode == 'full':
tmp = _unfold_2d_mgz(mgzs[mgz_key % brain_name], 'amplitude_estimate',
'bootstrap_num', brain_name, hemi)
if df is None:
df = tmp
else:
if df_mode == 'summary':
df = df.set_index(['voxel', 'stimulus_class'])
tmp = tmp.set_index(['voxel', 'stimulus_class'])
df[value_name] = tmp[value_name]
df = df.reset_index()
elif df_mode == 'full':
df = pd.concat([df, tmp])
for brain_name_full in benson_template_names + ['R2'] + prf_data_names:
brain_name = brain_name_full.replace('inferred_', '').replace('benson14_', '')
brain_name = brain_name.replace('all00-', '').replace('full-', '')
if brain_name == 'R2':
df_name = 'GLM_R2'
elif brain_name == 'vexpl':
df_name = 'prf_vexpl'
else:
df_name = brain_name
try:
tmp = pd.DataFrame(mgzs[mgz_key % brain_name])
tmp = tmp.reset_index().rename(columns={'index': 'voxel', 0: df_name})
df = df.merge(tmp)
except ValueError:
# see http://pandas.pydata.org/pandas-docs/version/0.19.1/gotchas.html#byte-ordering-issues
warnings.warn("%s had that big-endian error" % brain_name)
tmp = pd.DataFrame(mgzs[mgz_key % brain_name].byteswap().newbyteorder())
tmp = tmp.reset_index().rename(columns={'index': 'voxel', 0: df_name})
df = df.merge(tmp)
return df
def _put_mgzs_dict_into_df(mgzs, stim_df, results_names, df_mode,
benson_template_names=['varea', 'angle', 'eccen'],
prf_data_names=['sigma']):
df = {}
for hemi in ['lh', 'rh']:
df[hemi] = _setup_mgzs_for_df(mgzs, results_names, df_mode, hemi, benson_template_names,
prf_data_names)
# because python 0-indexes, the minimum voxel number is 0. thus if we were to just add the
# max, the min in the right hemi would be the same as the max in the left hemi
df['rh'].voxel = df['rh'].voxel + df['lh'].voxel.max()+1
df = pd.concat(df).reset_index(0, drop=True)
df = df.set_index('stimulus_class')
df = df.join(stim_df)
df = df.reset_index().rename(columns={'index': 'stimulus_class'})
# Add the stimulus frequency information
df = _add_freq_metainfo(df)
return df
def _find_closest_to(a, bs):
idx = np.argmin(np.abs(np.array(bs) - a))
return bs[idx]
def _round_freq_space_distance(df, core_distances=[6, 8, 11, 16, 23, 32, 45, 64, 91, 128, 181]):
df['rounded_freq_space_distance'] = df.freq_space_distance.apply(_find_closest_to,
bs=core_distances)
return df
def find_ecc_range_in_pixels(stim, mid_val=128):
"""find the min and max eccentricity of the stimulus, in pixels
all of our stimuli have a central aperture where nothing is presented and an outside limit,
beyond which nothing is presented.
this assumes the fixation is in the center of the stimulus, will have to re-think things if
it's not. also assumes that the "middle / zero value", which corresponds to no stimulus, is 127
returns min, max
"""
if stim.ndim == 3:
stim = stim[0, :, :]
R = sfp_stimuli.mkR(stim.shape)
x, y = np.where(stim != mid_val)
return R[x, y].min(), R[x, y].max()
def find_ecc_range_in_degrees(stim, stim_rad_deg, mid_val=128):
"""find the min and max eccentricity of the stimulus, in degrees
all of our stimuli have a central aperture where nothing is presented and an outside limit,
beyond which nothing is presented. In order to make sure we're not looking at voxels whose pRFs
lie outside the stimulus, we want to know the extent of the stimulus annulus, in degrees
this assumes the fixation is in the center of the stimulus, will have to re-think things if
it's not. also assumes that the "middle / zero value", which corresponds to no stimulus, is 127
stim_rad_deg: int or float, the radius of the stimulus, in degrees.
returns min, max
"""
if stim.ndim == 3:
stim = stim[0, :, :]
Rmin, Rmax = find_ecc_range_in_pixels(stim, mid_val)
R = sfp_stimuli.mkR(stim.shape)
# if stim_rad_deg corresponds to the max vertical/horizontal extent, the actual max will be
# np.sqrt(2*stim_rad_deg**2) (this corresponds to the far corner). this should be the radius of
# the screen, because R starts from the center and goes to the edge
factor = R.max() / np.sqrt(2*stim_rad_deg**2)
return Rmin / factor, Rmax / factor
def calculate_stim_local_sf(stim, w_1, w_2, stim_type, eccens, angles, stim_rad_deg=12,
plot_flag=False, mid_val=128):
"""calculate the local spatial frequency for a specified stimulus and screen size
stim: 2d array of floats. an example stimulus. used to determine where the stimuli are masked
(and thus where the spatial frequency is zero).
w_1, w_2: ints or floats. the first and second components of the stimulus's spatial
frequency. if stim_type is 'logarpolar' or 'pilot', this should be the radial and angular
components (in that order!); if stim_type is 'constant', this should be the x and y components
(in that order!)
stim_type: {'logpolar', 'constant', 'pilot'}. which type of stimuli were used in the session
we're analyzing. This matters because it changes the local spatial frequency and, since that is
determined analytically and not directly from the stimuli, we have no way of telling otherwise.
eccens, angles: lists of floats. these are the eccentricities and angles we want to find
local spatial frequency for.
stim_rad_deg: float, the radius of the stimulus, in degrees of visual angle
plot_flag: boolean, optional, default False. Whether to create a plot showing the local spatial
frequency vs eccentricity for the specified stimulus
mid_val: int. the value of mid-grey in the stimuli, should be 127 (for pilot stimuli) or 128
(for actual stimuli)
"""
eccen_min, eccen_max = find_ecc_range_in_degrees(stim, stim_rad_deg, mid_val)
eccen_local_freqs = []
for i, (e, a) in enumerate(zip(eccens, angles)):
if stim_type in ['logpolar', 'pilot']:
dx, dy, mag, direc = sfp_stimuli.sf_cpd(stim.shape[0], stim_rad_deg*2, e, a,
stim_type=stim_type, w_r=w_1, w_a=w_2)
dr, da, new_angle = sfp_stimuli.sf_origin_polar_cpd(stim.shape[0], stim_rad_deg*2, e,
a, stim_type=stim_type, w_r=w_1,
w_a=w_2)
elif stim_type == 'constant':
dx, dy, mag, direc = sfp_stimuli.sf_cpd(stim.shape[0], stim_rad_deg*2, e, a,
stim_type=stim_type, w_x=w_1, w_y=w_2)
dr, da, new_angle = sfp_stimuli.sf_origin_polar_cpd(stim.shape[0], stim_rad_deg*2, e,
a, stim_type=stim_type, w_x=w_1,
w_y=w_2)
eccen_local_freqs.append(pd.DataFrame(
{'local_w_x': dx, 'local_w_y': dy, 'local_w_r': dr, 'local_w_a': da, 'eccen': e,
'angle': a, 'local_sf_magnitude': mag, 'local_sf_xy_direction': direc,
'local_sf_ra_direction': new_angle}, [i]))
eccen_local_freqs = pd.concat(eccen_local_freqs)
if plot_flag:
plt.plot(eccen_local_freqs['eccen'], eccen_local_freqs['local_sf_magnitude'])
ax = plt.gca()
ax.set_title('Spatial frequency vs eccentricity')
ax.set_xlabel('Eccentricity (degrees)')
ax.set_ylabel('Local spatial frequency (cpd)')
return eccen_local_freqs
def _add_local_sf_to_df(df, stim, stim_type, stim_rad_deg=12, mid_val=128):
"""Adds local spatial frequency information for all stimuli to the df
"""
try:
freqs = df.drop_duplicates(['w_r', 'w_a'])[['w_r', 'w_a', 'stimulus_superclass']]
freq_labels = ['w_r', 'w_a']
except KeyError:
freqs = df.drop_duplicates(['w_x', 'w_y'])[['w_x', 'w_y', 'stimulus_superclass']]
freq_labels = ['w_x', 'w_y']
sfs = []
# this gets us the unique pairs of (eccen, angle). It will also include a column that gives the
# number of times each pair exists in the dataframe, but we ignore that.
df_eccens_angles = df.groupby(['eccen', 'angle']).size().reset_index()
for w_1, w_2, stim_class in freqs.values:
tmp = calculate_stim_local_sf(stim, w_1, w_2, stim_type, df_eccens_angles.eccen.values,
df_eccens_angles.angle.values, stim_rad_deg, mid_val=mid_val)
tmp[freq_labels[0]] = w_1
tmp[freq_labels[1]] = w_2
tmp['stimulus_superclass'] = stim_class
sfs.append(tmp)
sfs = pd.concat(sfs)
sfs = sfs.set_index(['stimulus_superclass', freq_labels[0], freq_labels[1], 'eccen', 'angle'])
df = df.set_index(['stimulus_superclass', freq_labels[0], freq_labels[1], 'eccen', 'angle'])
df = df.join(sfs)
return df.reset_index()
def _add_baseline(df):
if 'baseline' not in df.stimulus_superclass.unique():
return df.assign(baseline=0)
else:
new_df = []
for n, g in df.groupby(['varea', 'eccen']):
try:
baseline = g[g.stimulus_superclass == 'baseline'].amplitude_estimate.median()
except AttributeError:
baseline = g[g.stimulus_superclass == 'baseline'].amplitude_estimate_median.median()
new_df.append(g.assign(baseline=baseline))
return pd.concat(new_df)
def _transform_angle(x):
"""transform angle from Benson14 convention to our convention
The Benson atlases' convention for angle in visual field is: zero is the upper vertical
meridian, angle is in degrees, the left and right hemisphere both run from 0 to 180 from the
upper to lower meridian (so they increase as you go clockwise and counter-clockwise,
respectively). For our calculations, we need the following convention: zero is the right
horizontal meridian, angle is in radians (and lie between 0 and 2*pi, rather than -pi and pi),
angle increases as you go clockwise, and each angle is unique (refers to one point on the
visual field; we don't have the same number in the left and right hemispheres)
"""
ang = x.angle
if x.hemi == 'rh':
# we want to remap the right hemisphere angles to negative. Noah says this is the
# convention, but I have seen positive values there, so maybe it changed at one point.
if ang > 0:
ang = -ang
return np.mod(np.radians(ang - 90), 2*np.pi)
def _precision_dist(x, axis=None):
"""get precision from a distribution of values (inverse of variance)
"""
cis = np.percentile(x, [16, 84], axis=axis)
std_dev = abs(cis[0] - cis[1]) / 2.
return 1. / (std_dev**2)
def _append_precision_col(df):
"""calculate precision and add to the dataframe
the precision is the inverse of the variance and can be used as weights when combining across
voxels. here, for each voxel, we calculate the precision for each stimulus class's estimate and
then average across all stimulus classes to get a single precision estimate for each voxel.
"""
df = df.copy()
if 'amplitude_estimate_std_error' in df.columns:
df['precision'] = 1. / (df.amplitude_estimate_std_error ** 2)
else:
gb = df.groupby(['varea', 'voxel', 'stimulus_class'])
df = df.set_index(['varea', 'voxel', 'stimulus_class'])
df['precision'] = gb.amplitude_estimate.apply(_precision_dist)
df = df.reset_index()
gb = df.groupby(['varea', 'voxel'])
df = df.set_index(['varea', 'voxel'])
df['precision'] = gb.precision.mean()
return df.reset_index()
def _normalize_amplitude_estimate(df, norm_order=2):
"""calculates the norm of the ampltiude estimates, and normalizes by that
by default, this is the L2-norm (as calculated by np.linalg.norm). Specify norm_order to change
this, see https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.linalg.norm.html
for possible values.
"""
gb = df.groupby(['varea', 'voxel'])
df = df.set_index(['varea', 'voxel'])
if 'amplitude_estimate_median' in df.columns:
df['amplitude_estimate_norm'] = gb.amplitude_estimate_median.apply(np.linalg.norm,
norm_order)
df = df.reset_index()
for col in ['amplitude_estimate_median', 'amplitude_estimate_std_error']:
df['%s_normed' % col] = df[col] / df.amplitude_estimate_norm
else:
df['amplitude_estimate_norm'] = gb.amplitude_estimate.apply(np.linalg.norm, norm_order)
df = df.reset_index()
df['amplitude_estimate_normed'] = df.amplitude_estimate / df.amplitude_estimate_norm
return df
def main(benson_template_path, results_path, df_mode='summary', stim_type='logpolar',
save_path=None, class_nums=range(48), vareas=[1], eccen_range=(1, 12), stim_rad_deg=12,
benson_template_names=['inferred_varea', 'inferred_angle', 'inferred_eccen'],
benson_atlas_type='bayesian_posterior', prf_data_names=['all00-sigma'],
unshuffled_stim_path="../data/stimuli/task-sfp_stimuli.npy",
unshuffled_stim_descriptions_path="../data/stimuli/task-sfp_stim_description.csv",
mid_val=128):
"""this loads in the realigned mgz files and creates a dataframe of their values
This only returns those voxels that lie within visual areas outlined by the Benson14 varea mgz
this should be run after GLMdenoise and after realign.py. The mgz files you give the path to
should be surfaces, not volumes. this will take a while to run, which is why it's recommended
to provide save_path so the resulting dataframe can be saved.
benson_template_path: template path to the Benson14 mgz files, containing three string
formatting symbols (%s; one for retinotopy type [data, atlas, bayesian_posterior], one for
hemisphere, one for variable [angle, varea, eccen, sigma]),
e.g. /mnt/winawerlab/Projects/spatial_frequency_preferences/BIDS/derivatives/prf_solutions/sub-wlsubj042/%s/%s.%s.mgz
results_path: path to the results.mat file (output of GLMdenoise)
df_mode: {'summary', 'full'}. If 'summary', will load in the 'modelmd' and 'modelse' results
fields, using those calculated summary values. If 'full', will load in the bootstrapped
'models' results field, containing the info to calculate central tendency and spread
directly. In both cases, 'R2' will also be loaded in.
stim_type: {'logpolar', 'constant', 'pilot'}. which type of stimuli were used in the session
we're analyzing. This matters because it changes the local spatial frequency and, since that is
determined analytically and not directly from the stimuli, we have no way of telling otherwise.
save_path: None or str. if str, will save the GLM_result_df at this location
class_nums: list of ints. if df_mode=='full', which classes to load in. If df_mode=='summary',
then this is ignored.
vareas: list of ints. Which visual areas to include. the Benson14 template numbers vertices 0
(not a visual area), -3, -2 (V3v and V2v, respectively), and 1 through 7.
eccen_range: 2-tuple of ints or floats. What range of eccentricities to include.
stim_rad_deg: float, the radius of the stimulus, in degrees of visual angle
benson_template_names: list of labels that specify which output files to get from the Benson
retinotopy. The complete list is ['varea', 'angle', 'sigma', 'eccen'] (plus either "benson14_"
or "inferred_" beforehand). For this analysis to work, must contain 'varea' and 'eccen'.
prf_data_names: list of labels that specify which output files to get from the pRF fits
(without Bayesian retinotopy); we look for these in the directory found by inserting "data" as
the first string for benson_template_path. The complete list is ['varea', 'angle', 'sigma',
'eccen'] (plus either "all00-" or "full-" beforehand).
unshuffled_stim_path: path to the unshuffled stimuli.
unshuffled_stim_descriptions_path: path to the unshuffled stimulus description csv, as saved
during the creation of the stimuli
mid_val: int. the value of mid-grey in the stimuli, should be 127 (for pilot stimuli) or 128
(for actual stimuli)
"""
# This contains the information on each stimulus, allowing us to determine whether some stimuli
# are part of the same class or a separate one.
stim_df = pd.read_csv(unshuffled_stim_descriptions_path)
stim_df = stim_df.dropna()
stim_df.class_idx = stim_df.class_idx.astype(int)
stim_df = stim_df.drop_duplicates('class_idx').set_index('class_idx')
stim_df = stim_df.rename(columns={'index': 'stimulus_index'})
# we only need one stimulus, because all of them have the same masks, which is what we're
# interested in here
stim = np.load(unshuffled_stim_path)[0, :, :]
if df_mode == 'summary':
results_names = [('modelse', [None]), ('modelmd', [None])]
elif df_mode == 'full':
results_names = [('models', class_nums)]
else:
raise Exception("Don't know how to construct df with df_mode %s!" % df_mode)
for i in benson_template_names:
if i in prf_data_names:
raise Exception("Can only load variable from either Bayesian retinotopy or prf data, "
"not both! %s" % i)
# we do this because it's possible that benson_template_names contains more than just
# "varea", e.g., "inferred_varea"
if 'varea' in i:
if not os.path.isfile(benson_template_path % (benson_atlas_type, 'lh', i)):
raise Exception("Unable to find the Benson visual areas template! Check your "
"benson_template_path! Checked %s" % (benson_template_path %
(benson_atlas_type, 'lh', i)))
mgzs = _arrange_mgzs_into_dict(benson_template_path, results_path,
results_names+[('R2', [None])], vareas, eccen_range,
benson_template_names, prf_data_names, benson_atlas_type)
if save_path is not None:
fig, axes = plt.subplots(1, 2, figsize=(10, 5))
for ax, hemi in zip(axes, ['lh', 'rh']):
plot_data = mgzs['R2-%s' % hemi]
num_nans = sum(np.isnan(plot_data))
plot_data = plot_data[~np.isnan(plot_data)]
sns.histplot(x=plot_data, ax=ax, kde=False)
ax.set_title("R2 for %s, data originally contained %s NaNs" % (hemi, num_nans))
fig.savefig(save_path.replace('.csv', '_R2.svg'))
if results_names[0][1][0] is None:
# then all the results_names have None as their second value in the key tuple, so we can
# ignore it
results_names = [i[0] for i in results_names]
else:
# then every entry in results_names is a string followed by a list of
# ints. itertools.product will give us what we want, but we need to wrap the string in a
# list so that it doesn't iterate through the individual characters in the string
results_names = ["%s_%02d" % (k, l) for i, j in results_names for k, l in itertools.product([i], j)]
df = _put_mgzs_dict_into_df(mgzs, stim_df, results_names, df_mode, benson_template_names,
prf_data_names)
df.varea = df.varea.astype(int)
core_dists = df[df.stimulus_superclass == 'radial'].freq_space_distance.unique()
if stim_type in ['logpolar', 'pilot']:
df = _round_freq_space_distance(df, core_dists)
df['angle'] = df.apply(_transform_angle, 1)
df = _add_local_sf_to_df(df, stim, stim_type, stim_rad_deg, mid_val)
df = _add_baseline(df)
df = _append_precision_col(df)
df = _normalize_amplitude_estimate(df)
# drop any voxel that has at least one NaN value. these should only
# show up from interpolation (so only with sub-groupaverage) and
# should be very few
try:
df = df.groupby('voxel').filter(lambda x: ~np.isnan(x.amplitude_estimate_median).all())
except AttributeError:
# in this case, we don't have an amplitude_estimate_median, because
# this is the full dataframe and so we skip (we don't use full for
# sub-groupaverage)
pass
if save_path is not None:
df.to_csv(save_path, index=False)
return df
if __name__ == '__main__':
class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter):
pass
parser = argparse.ArgumentParser(
description=("Load in relevant data and create a DataFrame summarizing the first-level "
"results for a given subject. Note that this can take a rather long time."),
formatter_class=CustomFormatter)
parser.add_argument("--results_path", required=True,
help=("path to the results.mat file (output of GLMdenoise) for a single session"))
parser.add_argument("--benson_template_path", required=True,
help=("template path to the Benson14 mgz files, containing three string "
"formatting symbols (one for retinotopy type [data, atlas, bayesian_"
"posterior], one for hemisphere, one for variable [angle, varea, "
"eccen]). Can contain any environmental variable (in all caps, "
"contained within curly brackets, e.g., {SUBJECTS_DIR})"))
parser.add_argument("--stim_type", default='logpolar',
help=("{'logpolar', 'constant', 'pilot'}. which type of stimuli were used "
"in the session we're analyzing. This matters because it changes the"
" local spatial frequency and, since that is determined analytically"
" and not directly from the stimuli, we have no way of telling "
"otherwise."))
parser.add_argument("--save_dir", default="data/MRI_first_level",
help=("directory to save the GLM result DataFrame in. The DataFrame will "
"be saved in a sub-directory (named for the subject) of this as a "
"csv with some identifying information in the path."))
parser.add_argument("--df_mode", default='summary',
help=("{summary, full}. If summary, will load in the 'modelmd' and "
"'modelse' results fields, and use those calculated summary values. "
"If full, will load in the bootstrapped 'models' field, which "
"contains the info to calculate central tendency and spread directly"
". In both cases, 'R2' will also be loaded in."))
parser.add_argument("--class_nums", "-c", default=48, type=int,
help=("int. if df_mode=='full', will load classes in range(class_nums). If"
"df_mode=='summary', then this is ignored."))
parser.add_argument("--vareas", "-v", nargs='+', default=[1], type=int,
help=("list of ints. Which visual areas to include. the Benson14 template "
"numbers vertices 0 (not a visual area), -3, -2 (V3v and V2v, "
"respectively), and 1 through 7."))
parser.add_argument("--eccen_range", "-r", nargs=2, default=(1, 12), type=int,
help=("2-tuple of ints or floats. What range of eccentricities to "
"include."))
parser.add_argument("--stim_rad_deg", default=12, type=float,
help="float, the radius of the stimulus, in degrees of visual angle")
parser.add_argument("--benson_atlas_type", default="bayesian_posterior",
help=("{atlas, bayesian_posterior}. Type of Benson atlas. Will be the "
"first string inserted into benson_template_path"))
parser.add_argument("--benson_template_names", nargs='+',
default=['inferred_varea', 'inferred_angle', 'inferred_eccen'],
help=("list of labels that specify which output files to get from the "
"Benson retinotopy. For this analysis to work, must contain 'varea'"
" and 'eccen'. Note that some subjects might not have sigma."))
parser.add_argument("--prf_data_names", nargs='*',
default=['all00-sigma'],
help=("list of labels that specify which output files to get from the "
"pRF fits (will insert 'data' into the benson_template_path to find)"
"."))
parser.add_argument("--unshuffled_stim_descriptions_path", "-d",
default="data/stimuli/task-sfp_stim_description.csv",
help=("Path to the csv file that contains the pandas Dataframe that "
"specifies each stimulus's frequency"))
parser.add_argument("--unshuffled_stim_path", "-s",
default="data/stimuli/task-sfp_stimuli.npy",
help=("Path to the npy file that contains the numpy array with the stimuli"
" used in the experiment"))
parser.add_argument("--save_stem", default="",
help=("String to prefix the filename of output csv with. Useful for making"
" this BIDS-like"))
parser.add_argument("--mid_val", default=128, type=int,
help=("The value of mid-grey in the stimuli. Should be 127 for pilot "
"stimuli, 128 for real experiment"))
args = vars(parser.parse_args())
save_dir = args.pop('save_dir')
save_stem = args.pop('save_stem')
save_dict = {'df_mode': args['df_mode'], 'vareas': '-'.join(str(i) for i in args['vareas']),
'eccen': '-'.join(str(i) for i in args['eccen_range'])}
save_name = "v{vareas}_e{eccen}_{df_mode}.csv".format(**save_dict)
args['save_path'] = os.path.join(save_dir, save_stem+save_name)
args['class_nums'] = range(args['class_nums'])
if not os.path.isdir(os.path.dirname(args['save_path'])):
os.makedirs(os.path.dirname(args['save_path']))
main(**args)
| |
import os
import pytest
WARN_NOCOL = ("DEPRECATION: The --no-columns option will be "
"removed in the future.")
def test_list_command(script, data):
"""
Test default behavior of list command.
"""
script.pip(
'install', '-f', data.find_links, '--no-index', 'simple==1.0',
'simple2==3.0',
)
result = script.pip('list')
assert 'simple (1.0)' in result.stdout, str(result)
assert 'simple2 (3.0)' in result.stdout, str(result)
def test_columns_flag(script, data):
"""
Test the list command with the '--columns' option
"""
script.pip(
'install', '-f', data.find_links, '--no-index', 'simple==1.0',
'simple2==3.0',
)
result = script.pip('list', '--columns')
assert 'Package' in result.stdout, str(result)
assert 'Version' in result.stdout, str(result)
assert 'simple (1.0)' not in result.stdout, str(result)
assert 'simple 1.0' in result.stdout, str(result)
assert 'simple2 3.0' in result.stdout, str(result)
def test_nocolumns_flag(script, data):
"""
Test that --no-columns raises the deprecation warning and still outputs
the old-style formatting.
"""
script.pip(
'install', '-f', data.find_links, '--no-index', 'simple==1.0',
'simple2==3.0',
)
result = script.pip('list', '--no-columns', expect_stderr=True)
assert WARN_NOCOL in result.stderr, str(result)
assert 'simple (1.0)' in result.stdout, str(result)
assert 'simple2 (3.0)' in result.stdout, str(result)
def test_columns_nocolumns(script, data):
"""
Test that --no-columns has priority in --columns --no-columns.
"""
script.pip(
'install', '-f', data.find_links, '--no-index', 'simple==1.0',
'simple2==3.0',
)
result = script.pip(
'list', '--columns', '--no-columns',
expect_error=True,
)
assert WARN_NOCOL in result.stderr, str(result)
assert 'simple (1.0)' in result.stdout, str(result)
assert 'simple2 (3.0)' in result.stdout, str(result)
assert 'simple 1.0' not in result.stdout, str(result)
assert 'simple2 3.0' not in result.stdout, str(result)
def test_nocolumns_columns(script, data):
"""
Test that --columns has priority in --no-columns --columns.
"""
script.pip(
'install', '-f', data.find_links, '--no-index', 'simple==1.0',
'simple2==3.0',
)
result = script.pip(
'list', '--no-columns', '--columns',
)
assert 'Package' in result.stdout, str(result)
assert 'Version' in result.stdout, str(result)
assert 'simple (1.0)' not in result.stdout, str(result)
assert 'simple2 (3.0)' not in result.stdout, str(result)
assert 'simple 1.0' in result.stdout, str(result)
assert 'simple2 3.0' in result.stdout, str(result)
def test_local_flag(script, data):
"""
Test the behavior of --local flag in the list command
"""
script.pip('install', '-f', data.find_links, '--no-index', 'simple==1.0')
result = script.pip('list', '--local')
assert 'simple (1.0)' in result.stdout
def test_local_columns_flag(script, data):
"""
Test the behavior of --local --columns flags in the list command
"""
script.pip('install', '-f', data.find_links, '--no-index', 'simple==1.0')
result = script.pip('list', '--local', '--columns')
assert 'Package' in result.stdout
assert 'Version' in result.stdout
assert 'simple (1.0)' not in result.stdout
assert 'simple 1.0' in result.stdout, str(result)
def test_local_nocolumns_flag(script, data):
"""
Test the behavior of --local --no-columns flags in the list
command.
"""
script.pip('install', '-f', data.find_links, '--no-index', 'simple==1.0')
result = script.pip('list', '--local', '--no-columns', expect_stderr=True)
assert WARN_NOCOL in result.stderr, str(result)
assert 'simple (1.0)' in result.stdout
def test_user_flag(script, data, virtualenv):
"""
Test the behavior of --user flag in the list command
"""
virtualenv.system_site_packages = True
script.pip('install', '-f', data.find_links, '--no-index', 'simple==1.0')
script.pip('install', '-f', data.find_links, '--no-index',
'--user', 'simple2==2.0')
result = script.pip('list', '--user')
assert 'simple (1.0)' not in result.stdout
assert 'simple2 (2.0)' in result.stdout
def test_user_columns_flag(script, data, virtualenv):
"""
Test the behavior of --user --columns flags in the list command
"""
virtualenv.system_site_packages = True
script.pip('install', '-f', data.find_links, '--no-index', 'simple==1.0')
script.pip('install', '-f', data.find_links, '--no-index',
'--user', 'simple2==2.0')
result = script.pip('list', '--user', '--columns')
assert 'Package' in result.stdout
assert 'Version' in result.stdout
assert 'simple2 (2.0)' not in result.stdout
assert 'simple2 2.0' in result.stdout, str(result)
def test_user_nocolumns_flag(script, data, virtualenv):
"""
Test the behavior of --user flag in the list command
"""
virtualenv.system_site_packages = True
script.pip('install', '-f', data.find_links, '--no-index', 'simple==1.0')
script.pip('install', '-f', data.find_links, '--no-index',
'--user', 'simple2==2.0')
result = script.pip('list', '--user', '--no-columns', expect_stderr=True)
assert WARN_NOCOL in result.stderr, str(result)
assert 'simple (1.0)' not in result.stdout
assert 'simple2 (2.0)' in result.stdout, str(result)
@pytest.mark.network
def test_uptodate_flag(script, data):
"""
Test the behavior of --uptodate flag in the list command
"""
script.pip(
'install', '-f', data.find_links, '--no-index', 'simple==1.0',
'simple2==3.0',
)
script.pip(
'install', '-e',
'git+https://github.com/pypa/pip-test-package.git#egg=pip-test-package'
)
result = script.pip(
'list', '-f', data.find_links, '--no-index', '--uptodate',
expect_stderr=True,
)
assert 'simple (1.0)' not in result.stdout # 3.0 is latest
assert 'pip-test-package (0.1.1,' in result.stdout # editables included
assert 'simple2 (3.0)' in result.stdout, str(result)
@pytest.mark.network
def test_uptodate_columns_flag(script, data):
"""
Test the behavior of --uptodate --columns flag in the list command
"""
script.pip(
'install', '-f', data.find_links, '--no-index', 'simple==1.0',
'simple2==3.0',
)
script.pip(
'install', '-e',
'git+https://github.com/pypa/pip-test-package.git#egg=pip-test-package'
)
result = script.pip(
'list', '-f', data.find_links, '--no-index', '--uptodate',
'--columns',
)
assert 'Package' in result.stdout
assert 'Version' in result.stdout
assert 'Location' in result.stdout # editables included
assert 'pip-test-package (0.1.1,' not in result.stdout
assert 'pip-test-package 0.1.1' in result.stdout, str(result)
assert 'simple2 3.0' in result.stdout, str(result)
@pytest.mark.network
def test_uptodate_nocolumns_flag(script, data):
"""
Test the behavior of --uptodate --no-columns flag in the list command
"""
script.pip(
'install', '-f', data.find_links, '--no-index', 'simple==1.0',
'simple2==3.0',
)
script.pip(
'install', '-e',
'git+https://github.com/pypa/pip-test-package.git#egg=pip-test-package'
)
result = script.pip(
'list', '-f', data.find_links, '--no-index', '--uptodate',
'--no-columns', expect_stderr=True,
)
assert WARN_NOCOL in result.stderr, str(result)
assert 'simple (1.0)' not in result.stdout # 3.0 is latest
assert 'pip-test-package (0.1.1,' in result.stdout # editables included
assert 'simple2 (3.0)' in result.stdout, str(result)
@pytest.mark.network
def test_outdated_flag(script, data):
"""
Test the behavior of --outdated flag in the list command
"""
script.pip(
'install', '-f', data.find_links, '--no-index', 'simple==1.0',
'simple2==3.0', 'simplewheel==1.0',
)
script.pip(
'install', '-e',
'git+https://github.com/pypa/pip-test-package.git'
'@0.1#egg=pip-test-package'
)
result = script.pip(
'list', '-f', data.find_links, '--no-index', '--outdated',
expect_stderr=True,
)
assert 'simple (1.0) - Latest: 3.0 [sdist]' in result.stdout
assert 'simplewheel (1.0) - Latest: 2.0 [wheel]' in result.stdout
assert 'pip-test-package (0.1, ' in result.stdout
assert ' Latest: 0.1.1 [sdist]' in result.stdout
assert 'simple2' not in result.stdout, str(result) # 3.0 is latest
@pytest.mark.network
def test_outdated_columns_flag(script, data):
"""
Test the behavior of --outdated --columns flag in the list command
"""
script.pip(
'install', '-f', data.find_links, '--no-index', 'simple==1.0',
'simple2==3.0', 'simplewheel==1.0',
)
script.pip(
'install', '-e',
'git+https://github.com/pypa/pip-test-package.git'
'@0.1#egg=pip-test-package'
)
result = script.pip(
'list', '-f', data.find_links, '--no-index', '--outdated',
'--columns',
)
assert 'Package' in result.stdout
assert 'Version' in result.stdout
assert 'Latest' in result.stdout
assert 'Type' in result.stdout
assert 'simple (1.0) - Latest: 3.0 [sdist]' not in result.stdout
assert 'simplewheel (1.0) - Latest: 2.0 [wheel]' not in result.stdout
assert 'simple 1.0 3.0 sdist' in result.stdout, (
str(result)
)
assert 'simplewheel 1.0 2.0 wheel' in result.stdout, (
str(result)
)
assert 'simple2' not in result.stdout, str(result) # 3.0 is latest
@pytest.mark.network
def test_outdated_nocolumns_flag(script, data):
"""
Test the behavior of --outdated --no-columns flag in the list command
"""
script.pip(
'install', '-f', data.find_links, '--no-index', 'simple==1.0',
'simple2==3.0', 'simplewheel==1.0',
)
script.pip(
'install', '-e',
'git+https://github.com/pypa/pip-test-package.git'
'@0.1#egg=pip-test-package'
)
result = script.pip(
'list', '-f', data.find_links, '--no-index', '--outdated',
'--no-columns', expect_stderr=True,
)
assert WARN_NOCOL in result.stderr, str(result)
assert 'simple (1.0) - Latest: 3.0 [sdist]' in result.stdout
assert 'simplewheel (1.0) - Latest: 2.0 [wheel]' in result.stdout
assert 'pip-test-package (0.1, ' in result.stdout
assert ' Latest: 0.1.1 [sdist]' in result.stdout
assert 'simple2' not in result.stdout, str(result) # 3.0 is latest
@pytest.mark.network
def test_editables_flag(script, data):
"""
Test the behavior of --editables flag in the list command
"""
script.pip('install', '-f', data.find_links, '--no-index', 'simple==1.0')
result = script.pip(
'install', '-e',
'git+https://github.com/pypa/pip-test-package.git#egg=pip-test-package'
)
result = script.pip('list', '--editable')
assert 'simple (1.0)' not in result.stdout, str(result)
assert os.path.join('src', 'pip-test-package') in result.stdout, (
str(result)
)
@pytest.mark.network
def test_editables_columns_flag(script, data):
"""
Test the behavior of --editables flag in the list command
"""
script.pip('install', '-f', data.find_links, '--no-index', 'simple==1.0')
result = script.pip(
'install', '-e',
'git+https://github.com/pypa/pip-test-package.git#egg=pip-test-package'
)
result = script.pip('list', '--editable', '--columns')
assert 'Package' in result.stdout
assert 'Version' in result.stdout
assert 'Location' in result.stdout
assert os.path.join('src', 'pip-test-package') in result.stdout, (
str(result)
)
@pytest.mark.network
def test_editables_nocolumns_flag(script, data):
"""
Test the behavior of --editables flag in the list command
"""
script.pip('install', '-f', data.find_links, '--no-index', 'simple==1.0')
script.pip(
'install', '-e',
'git+https://github.com/pypa/pip-test-package.git#egg=pip-test-package'
)
result = script.pip(
'list', '--editable', '--no-columns', expect_stderr=True,
)
assert WARN_NOCOL in result.stderr, str(result)
assert 'simple (1.0)' not in result.stdout, str(result)
assert os.path.join('src', 'pip-test-package') in result.stdout, (
str(result)
)
@pytest.mark.network
def test_uptodate_editables_flag(script, data):
"""
test the behavior of --editable --uptodate flag in the list command
"""
script.pip('install', '-f', data.find_links, '--no-index', 'simple==1.0')
result = script.pip(
'install', '-e',
'git+https://github.com/pypa/pip-test-package.git#egg=pip-test-package'
)
result = script.pip(
'list', '-f', data.find_links, '--no-index',
'--editable', '--uptodate',
expect_stderr=True,
)
assert 'simple (1.0)' not in result.stdout, str(result)
assert os.path.join('src', 'pip-test-package') in result.stdout, (
str(result)
)
@pytest.mark.network
def test_uptodate_editables_columns_flag(script, data):
"""
test the behavior of --editable --uptodate --columns flag in the
list command
"""
script.pip('install', '-f', data.find_links, '--no-index', 'simple==1.0')
result = script.pip(
'install', '-e',
'git+https://github.com/pypa/pip-test-package.git#egg=pip-test-package'
)
result = script.pip(
'list', '-f', data.find_links, '--no-index',
'--editable', '--uptodate', '--columns',
)
assert 'Package' in result.stdout
assert 'Version' in result.stdout
assert 'Location' in result.stdout
assert os.path.join('src', 'pip-test-package') in result.stdout, (
str(result)
)
@pytest.mark.network
def test_uptodate_editables_nocolumns_flag(script, data):
"""
test the behavior of --editable --uptodate --columns --no-columns flag
in the list command
"""
script.pip('install', '-f', data.find_links, '--no-index', 'simple==1.0')
script.pip(
'install', '-e',
'git+https://github.com/pypa/pip-test-package.git#egg=pip-test-package'
)
result = script.pip(
'list', '-f', data.find_links, '--no-index', '--editable',
'--uptodate', '--no-columns', expect_stderr=True,
)
assert WARN_NOCOL in result.stderr, str(result)
assert 'simple (1.0)' not in result.stdout, str(result)
assert os.path.join('src', 'pip-test-package') in result.stdout, (
str(result)
)
@pytest.mark.network
def test_outdated_editables_flag(script, data):
"""
test the behavior of --editable --outdated flag in the list command
"""
script.pip('install', '-f', data.find_links, '--no-index', 'simple==1.0')
result = script.pip(
'install', '-e',
'git+https://github.com/pypa/pip-test-package.git'
'@0.1#egg=pip-test-package'
)
result = script.pip(
'list', '-f', data.find_links, '--no-index',
'--editable', '--outdated',
expect_stderr=True,
)
assert 'simple (1.0)' not in result.stdout, str(result)
assert os.path.join('src', 'pip-test-package') in result.stdout, (
str(result)
)
@pytest.mark.network
def test_outdated_editables_columns_flag(script, data):
"""
test the behavior of --editable --outdated flag in the list command
"""
script.pip('install', '-f', data.find_links, '--no-index', 'simple==1.0')
result = script.pip(
'install', '-e',
'git+https://github.com/pypa/pip-test-package.git'
'@0.1#egg=pip-test-package'
)
result = script.pip(
'list', '-f', data.find_links, '--no-index',
'--editable', '--outdated', '--columns',
)
assert 'Package' in result.stdout
assert 'Version' in result.stdout
assert 'Location' in result.stdout
assert os.path.join('src', 'pip-test-package') in result.stdout, (
str(result)
)
@pytest.mark.network
def test_outdated_editables_nocolumns_flag(script, data):
"""
test the behavior of --editable --outdated flag in the list command
"""
script.pip('install', '-f', data.find_links, '--no-index', 'simple==1.0')
script.pip(
'install', '-e',
'git+https://github.com/pypa/pip-test-package.git'
'@0.1#egg=pip-test-package'
)
result = script.pip(
'list', '-f', data.find_links, '--no-index',
'--editable', '--outdated', '--no-columns',
expect_stderr=True,
)
assert WARN_NOCOL in result.stderr, str(result)
assert 'simple (1.0)' not in result.stdout, str(result)
assert os.path.join('src', 'pip-test-package') in result.stdout, (
str(result)
)
def test_outdated_pre(script, data):
script.pip('install', '-f', data.find_links, '--no-index', 'simple==1.0')
# Let's build a fake wheelhouse
script.scratch_path.join("wheelhouse").mkdir()
wheelhouse_path = script.scratch_path / 'wheelhouse'
wheelhouse_path.join('simple-1.1-py2.py3-none-any.whl').write('')
wheelhouse_path.join('simple-2.0.dev0-py2.py3-none-any.whl').write('')
result = script.pip('list', '--no-index', '--find-links', wheelhouse_path)
assert 'simple (1.0)' in result.stdout
result = script.pip('list', '--no-index', '--find-links', wheelhouse_path,
'--outdated')
assert 'simple (1.0) - Latest: 1.1 [wheel]' in result.stdout
result_pre = script.pip('list', '--no-index',
'--find-links', wheelhouse_path,
'--outdated', '--pre')
assert 'simple (1.0) - Latest: 2.0.dev0 [wheel]' in result_pre.stdout
def test_outdated_pre_columns(script, data):
""" Test of interaction behavior of --pre and --columns """
script.pip('install', '-f', data.find_links, '--no-index', 'simple==1.0')
# Let's build a fake wheelhouse
script.scratch_path.join("wheelhouse").mkdir()
wheelhouse_path = script.scratch_path / 'wheelhouse'
wheelhouse_path.join('simple-1.1-py2.py3-none-any.whl').write('')
wheelhouse_path.join('simple-2.0.dev0-py2.py3-none-any.whl').write('')
result = script.pip('list', '--no-index', '--find-links', wheelhouse_path)
assert 'simple (1.0)' in result.stdout
result = script.pip('list', '--no-index', '--find-links', wheelhouse_path,
'--outdated')
assert 'simple (1.0) - Latest: 1.1 [wheel]' in result.stdout
result_pre = script.pip(
'list', '--no-index', '--find-links', wheelhouse_path,
'--outdated', '--pre', '--columns',
)
assert 'Package' in result_pre.stdout
assert 'Version' in result_pre.stdout
assert 'Latest' in result_pre.stdout
assert 'Type' in result_pre.stdout
def test_outdated_pre_nocolumns(script, data):
""" Test of interaction behavior of --pre and --no-columns """
script.pip('install', '-f', data.find_links, '--no-index', 'simple==1.0')
# Let's build a fake wheelhouse
script.scratch_path.join("wheelhouse").mkdir()
wheelhouse_path = script.scratch_path / 'wheelhouse'
wheelhouse_path.join('simple-1.1-py2.py3-none-any.whl').write('')
wheelhouse_path.join('simple-2.0.dev0-py2.py3-none-any.whl').write('')
result = script.pip('list', '--no-index', '--find-links', wheelhouse_path)
assert 'simple (1.0)' in result.stdout
result = script.pip('list', '--no-index', '--find-links', wheelhouse_path,
'--outdated')
assert 'simple (1.0) - Latest: 1.1 [wheel]' in result.stdout
result = script.pip(
'list', '--no-index',
'--find-links', wheelhouse_path,
'--outdated', '--pre', '--no-columns', expect_stderr=True
)
assert WARN_NOCOL in result.stderr, str(result)
| |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Application context management for the cr tool.
Contains all the support code to enable the shared context used by the cr tool.
This includes the configuration variables and command line handling.
"""
from __future__ import print_function
import argparse
import os
import cr
class _DumpVisitor(cr.visitor.ExportVisitor):
"""A visitor that prints all variables in a config hierarchy."""
def __init__(self, with_source):
super(_DumpVisitor, self).__init__({})
self.to_dump = {}
self.with_source = with_source
def StartNode(self):
if self.with_source:
self._DumpNow()
super(_DumpVisitor, self).StartNode()
def EndNode(self):
if self.with_source or not self.stack:
self._DumpNow()
super(_DumpVisitor, self).EndNode()
if not self.stack:
self._DumpNow()
def Visit(self, key, value):
super(_DumpVisitor, self).Visit(key, value)
if key in self.store:
str_value = str(self.store[key])
if str_value != str(os.environ.get(key, None)):
self.to_dump[key] = str_value
def _DumpNow(self):
if self.to_dump:
if self.with_source:
print('From', self.Where())
for key in sorted(self.to_dump.keys()):
print(' ', key, '=', self.to_dump[key])
self.to_dump = {}
class _ShowHelp(argparse.Action):
"""An argparse action to print the help text.
This is like the built in help text printing action, except it knows to do
nothing when we are just doing the early speculative parse of the args.
"""
def __call__(self, parser, namespace, values, option_string=None):
if cr.context.speculative:
return
command = cr.Command.GetActivePlugin()
if command:
command.parser.print_help()
else:
parser.print_help()
exit(1)
class _ArgumentParser(argparse.ArgumentParser):
"""An extension of an ArgumentParser to enable speculative parsing.
It supports doing an early parse that never produces errors or output, to do
early collection of arguments that may affect what other arguments are
allowed.
"""
def error(self, message):
if cr.context.speculative:
return
super(_ArgumentParser, self).error(message)
def parse_args(self):
if cr.context.speculative:
result = self.parse_known_args()
if result:
return result[0]
return None
return super(_ArgumentParser, self).parse_args()
def parse_known_args(self, args=None, namespace=None):
result = super(_ArgumentParser, self).parse_known_args(args, namespace)
if result is None:
return namespace, None
return result
# The context stack
_stack = []
class _ContextData:
pass
class Context(cr.config.Config):
"""The base context holder for the cr system.
This holds the common context shared throughout cr.
Mostly this is stored in the Config structure of variables.
"""
def __init__(self, name='Context'):
super(Context, self).__init__(name)
self._data = _ContextData()
def CreateData(self, description='', epilog=''):
self._data.args = None
self._data.arguments = cr.config.Config('ARGS')
self._data.derived = cr.config.Config('DERIVED')
self.AddChildren(*cr.config.GLOBALS)
self.AddChildren(
cr.config.Config('ENVIRONMENT', literal=True, export=True).Set(
{k: self.ParseValue(v) for k, v in os.environ.items()}),
self._data.arguments,
self._data.derived,
)
# Build the command line argument parser
self._data.parser = _ArgumentParser(add_help=False, description=description,
epilog=epilog)
self._data.subparsers = self.parser.add_subparsers()
# Add the global arguments
self.AddCommonArguments(self._data.parser)
self._data.gclient = {}
@property
def data(self):
return self._data
def __enter__(self):
""" To support using 'with cr.base.context.Create():'"""
_stack.append(self)
cr.context = self
return self
def __exit__(self, *_):
_stack.pop()
if _stack:
cr.context = _stack[-1]
return False
def AddSubParser(self, source):
parser = source.AddArguments(self._data.subparsers)
@classmethod
def AddCommonArguments(cls, parser):
"""Adds the command line arguments common to all commands in cr."""
parser.add_argument(
'-h', '--help',
action=_ShowHelp, nargs=0,
help='show the help message and exit.'
)
parser.add_argument(
'--dry-run', dest='CR_DRY_RUN',
action='store_true', default=None,
help="""
Don't execute commands, just print them. Implies verbose.
Overrides CR_DRY_RUN
"""
)
parser.add_argument(
'-v', '--verbose', dest='CR_VERBOSE',
action='count', default=None,
help="""
Print information about commands being performed.
Repeating multiple times increases the verbosity level.
Overrides CR_VERBOSE
"""
)
@property
def args(self):
return self._data.args
@property
def arguments(self):
return self._data.arguments
@property
def speculative(self):
return self._data.speculative
@property
def derived(self):
return self._data.derived
@property
def parser(self):
return self._data.parser
@property
def remains(self):
remains = getattr(self._data.args, '_remains', None)
if remains and remains[0] == '--':
remains = remains[1:]
return remains
@property
def verbose(self):
if self.autocompleting:
return False
return self.Find('CR_VERBOSE') or self.dry_run
@property
def dry_run(self):
if self.autocompleting:
return True
return self.Find('CR_DRY_RUN')
@property
def autocompleting(self):
return 'COMP_WORD' in os.environ
@property
def gclient(self):
if not self._data.gclient:
self._data.gclient = cr.base.client.ReadGClient()
return self._data.gclient
def ParseArgs(self, speculative=False):
cr.plugin.DynamicChoices.only_active = not speculative
self._data.speculative = speculative
self._data.args = self._data.parser.parse_args()
self._data.arguments.Wipe()
if self._data.args:
self._data.arguments.Set(
{k: v for k, v in vars(self._data.args).items() if v is not None})
def DumpValues(self, with_source):
_DumpVisitor(with_source).VisitNode(self)
def Create(description='', epilog=''):
context = Context()
context.CreateData(description=description, epilog=epilog)
return context
| |
import abc
import csv
from collections import namedtuple, defaultdict, OrderedDict, Counter
import numpy as np
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.metrics.pairwise import cosine_similarity as sim
from sklearn.pipeline import Pipeline
STOP_POS = {'CONJ', 'INTJ', 'PART', 'PR', 'UNKNOWN'}
Synset = namedtuple('Synset', 'id synonyms hypernyms bag')
class Inventory(object):
"""Sense inventory representation and loader."""
synsets = {}
index = defaultdict(list)
def __init__(self, inventory_path):
"""
During the construction, BaseWSD parses the given sense inventory file.
"""
def field_to_bag(field):
return {word: freq for record in field.split(', ')
for word, freq in (self.lexeme(record),)
if record}
with open(inventory_path, 'r', encoding='utf-8', newline='') as f:
reader = csv.reader(f, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
id = row[0]
synonyms = field_to_bag(row[2])
hypernyms = field_to_bag(row[4])
self.synsets[id] = Synset(
id=id,
synonyms=synonyms,
hypernyms=hypernyms,
bag={**synonyms, **hypernyms}
)
for word in self.synsets[id].bag:
self.index[word].append(id)
def lexeme(self, record):
"""
Parse the sense representations like 'word#sid:freq'.
Actually, we do not care about the sid field because
we use synset identifiers instead.
"""
if '#' in record:
word, tail = record.split('#', 1)
else:
word, tail = record, None
if tail:
if ':' in tail:
sid, tail = tail.split(':', 1)
else:
sid, tail = tail, None
if tail:
freq = float(tail)
else:
freq = 1
return word, freq
Span = namedtuple('Span', 'token pos lemma index')
class BaseWSD(object):
"""
Base class for word sense disambiguation routines. Should not be used.
Descendant classes must implement the disambiguate_word() method.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, inventory):
self.inventory = inventory
def lemmatize(self, sentence):
"""
This method transforms the given sentence into the dict that
maps the word indices to their lemmas. It also excludes those
words which part of speech is in the stop list.
"""
return {i: lemma for i, (_, lemma, pos) in enumerate(sentence)
if pos not in STOP_POS}
@abc.abstractmethod
def disambiguate_word(self, sentence, index):
"""
Return word sense identifier for the given word in the sentence.
"""
if not sentence or not isinstance(sentence, list):
raise ValueError('sentence should be a list')
if not isinstance(index, int) or index < 0 or index >= len(sentence):
raise ValueError('index should be in [0...%d]' % len(sentence))
def disambiguate(self, sentence):
"""
Return word sense identifiers corresponding to the words
in the given sentence.
"""
result = OrderedDict()
for index, span in enumerate(sentence):
# here, span is (token, pos, lemma), but we also need index
span = Span(*span, index)
result[span] = self.disambiguate_word(sentence, index)
return result
class OneBaseline(BaseWSD):
"""
A simple baseline that treats every word as monosemeous. Not thread-safe.
"""
counter = {}
def __init__(self):
super().__init__(None)
def disambiguate_word(self, sentence, index):
super().disambiguate_word(sentence, index)
word, _, _ = sentence[index]
if word not in self.counter:
self.counter[word] = len(self.counter)
return str(self.counter[word])
class SingletonsBaseline(BaseWSD):
"""
A simple baseline that puts every instance into a different cluster. Not thread-safe.
"""
counter = 0
def __init__(self):
super().__init__(None)
def disambiguate_word(self, sentence, index):
super().disambiguate_word(sentence, index)
self.counter += 1
return str(self.counter)
class SparseWSD(BaseWSD):
"""
A simple sparse word sense disambiguation.
"""
sparse = Pipeline([('dict', DictVectorizer()), ('tfidf', TfidfTransformer())])
def __init__(self, inventory):
super().__init__(inventory)
self.sparse.fit([synset.bag for synset in self.inventory.synsets.values()])
def disambiguate_word(self, sentence, index):
super().disambiguate_word(sentence, index)
lemmas = self.lemmatize(sentence)
if index not in lemmas:
return
svector = self.sparse.transform(Counter(lemmas.values())) # sentence vector
def search(query):
"""
Map synset identifiers to the cosine similarity value.
This function calls the function query(id) that retrieves
the corresponding dict of words.
"""
return Counter({id: sim(svector, self.sparse.transform(query(id))).item(0)
for id in self.inventory.index[lemmas[index]]})
candidates = search(lambda id: self.inventory.synsets[id].synonyms)
# give the hypernyms a chance if nothing is found
if not candidates:
candidates = search(lambda id: self.inventory.synsets[id].bag)
if not candidates:
return
for id, _ in candidates.most_common(1):
return id
class DenseWSD(BaseWSD):
"""
A word sense disambiguation approach that is based on SenseGram.
"""
class densedict(dict):
"""
A handy dict that transforms a synset into its dense representation.
"""
def __init__(self, synsets, sensegram):
self.synsets = synsets
self.sensegram = sensegram
def __missing__(self, id):
value = self[id] = self.sensegram(self.synsets[id].bag.keys())
return value
def __init__(self, inventory, wv):
super().__init__(inventory)
self.wv = wv
self.dense = self.densedict(self.inventory.synsets, self.sensegram)
def sensegram(self, words):
"""
This is a simple implementation of SenseGram.
It just averages the embeddings corresponding to the given words.
"""
vectors = self.words_vec(set(words))
if not vectors:
return
return np.mean(np.vstack(vectors.values()), axis=0).reshape(1, -1)
def words_vec(self, words, use_norm=False):
"""
Return a dict that maps the given words to their embeddings.
"""
if callable(getattr(self.wv, 'words_vec', None)):
return self.wv.words_vec(words, use_norm)
return {word: self.wv.word_vec(word, use_norm) for word in words if word in self.wv}
def disambiguate_word(self, sentence, index):
super().disambiguate_word(sentence, index)
lemmas = self.lemmatize(sentence)
if index not in lemmas:
return
svector = self.sensegram(lemmas.values()) # sentence vector
if svector is None:
return
# map synset identifiers to the cosine similarity value
candidates = Counter({id: sim(svector, self.dense[id]).item(0)
for id in self.inventory.index[lemmas[index]]
if self.dense[id] is not None})
if not candidates:
return
for id, _ in candidates.most_common(1):
return id
class LeskWSD(BaseWSD):
"""
A word sense disambiguation approach that is based on Lesk method.
"""
def __init__(self, inventory):
super().__init__(inventory)
def disambiguate_word(self, sentence, word_index):
super().disambiguate_word(sentence, word_index)
lemmas = self.lemmatize(sentence)
if word_index not in lemmas:
return
mentions_dict = dict()
for synset_number in self.inventory.index[lemmas[word_index]]:
mentions_dict[synset_number] = 0
for context_word in lemmas.values():
if context_word != lemmas[word_index]:
if context_word in self.inventory.synsets[synset_number].synonyms:
mentions_dict[synset_number] = mentions_dict[synset_number] + 1
elif context_word in self.inventory.synsets[synset_number].hypernyms:
mentions_dict[synset_number] = mentions_dict[synset_number] + \
self.inventory.synsets[synset_number].hypernyms[context_word]
if len(mentions_dict) > 0:
return max(mentions_dict, key=mentions_dict.get)
else:
return
| |
<<<<<<< HEAD
<<<<<<< HEAD
"""Fix incompatible imports and module references."""
# Authors: Collin Winter, Nick Edds
# Local imports
from .. import fixer_base
from ..fixer_util import Name, attr_chain
MAPPING = {'StringIO': 'io',
'cStringIO': 'io',
'cPickle': 'pickle',
'__builtin__' : 'builtins',
'copy_reg': 'copyreg',
'Queue': 'queue',
'SocketServer': 'socketserver',
'ConfigParser': 'configparser',
'repr': 'reprlib',
'FileDialog': 'tkinter.filedialog',
'tkFileDialog': 'tkinter.filedialog',
'SimpleDialog': 'tkinter.simpledialog',
'tkSimpleDialog': 'tkinter.simpledialog',
'tkColorChooser': 'tkinter.colorchooser',
'tkCommonDialog': 'tkinter.commondialog',
'Dialog': 'tkinter.dialog',
'Tkdnd': 'tkinter.dnd',
'tkFont': 'tkinter.font',
'tkMessageBox': 'tkinter.messagebox',
'ScrolledText': 'tkinter.scrolledtext',
'Tkconstants': 'tkinter.constants',
'Tix': 'tkinter.tix',
'ttk': 'tkinter.ttk',
'Tkinter': 'tkinter',
'markupbase': '_markupbase',
'_winreg': 'winreg',
'thread': '_thread',
'dummy_thread': '_dummy_thread',
# anydbm and whichdb are handled by fix_imports2
'dbhash': 'dbm.bsd',
'dumbdbm': 'dbm.dumb',
'dbm': 'dbm.ndbm',
'gdbm': 'dbm.gnu',
'xmlrpclib': 'xmlrpc.client',
'DocXMLRPCServer': 'xmlrpc.server',
'SimpleXMLRPCServer': 'xmlrpc.server',
'httplib': 'http.client',
'htmlentitydefs' : 'html.entities',
'HTMLParser' : 'html.parser',
'Cookie': 'http.cookies',
'cookielib': 'http.cookiejar',
'BaseHTTPServer': 'http.server',
'SimpleHTTPServer': 'http.server',
'CGIHTTPServer': 'http.server',
#'test.test_support': 'test.support',
'commands': 'subprocess',
'UserString' : 'collections',
'UserList' : 'collections',
'urlparse' : 'urllib.parse',
'robotparser' : 'urllib.robotparser',
}
def alternates(members):
return "(" + "|".join(map(repr, members)) + ")"
def build_pattern(mapping=MAPPING):
mod_list = ' | '.join(["module_name='%s'" % key for key in mapping])
bare_names = alternates(mapping.keys())
yield """name_import=import_name< 'import' ((%s) |
multiple_imports=dotted_as_names< any* (%s) any* >) >
""" % (mod_list, mod_list)
yield """import_from< 'from' (%s) 'import' ['(']
( any | import_as_name< any 'as' any > |
import_as_names< any* >) [')'] >
""" % mod_list
yield """import_name< 'import' (dotted_as_name< (%s) 'as' any > |
multiple_imports=dotted_as_names<
any* dotted_as_name< (%s) 'as' any > any* >) >
""" % (mod_list, mod_list)
# Find usages of module members in code e.g. thread.foo(bar)
yield "power< bare_with_attr=(%s) trailer<'.' any > any* >" % bare_names
class FixImports(fixer_base.BaseFix):
BM_compatible = True
keep_line_order = True
# This is overridden in fix_imports2.
mapping = MAPPING
# We want to run this fixer late, so fix_import doesn't try to make stdlib
# renames into relative imports.
run_order = 6
def build_pattern(self):
return "|".join(build_pattern(self.mapping))
def compile_pattern(self):
# We override this, so MAPPING can be pragmatically altered and the
# changes will be reflected in PATTERN.
self.PATTERN = self.build_pattern()
super(FixImports, self).compile_pattern()
# Don't match the node if it's within another match.
def match(self, node):
match = super(FixImports, self).match
results = match(node)
if results:
# Module usage could be in the trailer of an attribute lookup, so we
# might have nested matches when "bare_with_attr" is present.
if "bare_with_attr" not in results and \
any(match(obj) for obj in attr_chain(node, "parent")):
return False
return results
return False
def start_tree(self, tree, filename):
super(FixImports, self).start_tree(tree, filename)
self.replace = {}
def transform(self, node, results):
import_mod = results.get("module_name")
if import_mod:
mod_name = import_mod.value
new_name = self.mapping[mod_name]
import_mod.replace(Name(new_name, prefix=import_mod.prefix))
if "name_import" in results:
# If it's not a "from x import x, y" or "import x as y" import,
# marked its usage to be replaced.
self.replace[mod_name] = new_name
if "multiple_imports" in results:
# This is a nasty hack to fix multiple imports on a line (e.g.,
# "import StringIO, urlparse"). The problem is that I can't
# figure out an easy way to make a pattern recognize the keys of
# MAPPING randomly sprinkled in an import statement.
results = self.match(node)
if results:
self.transform(node, results)
else:
# Replace usage of the module.
bare_name = results["bare_with_attr"][0]
new_name = self.replace.get(bare_name.value)
if new_name:
bare_name.replace(Name(new_name, prefix=bare_name.prefix))
=======
"""Fix incompatible imports and module references."""
# Authors: Collin Winter, Nick Edds
# Local imports
from .. import fixer_base
from ..fixer_util import Name, attr_chain
MAPPING = {'StringIO': 'io',
'cStringIO': 'io',
'cPickle': 'pickle',
'__builtin__' : 'builtins',
'copy_reg': 'copyreg',
'Queue': 'queue',
'SocketServer': 'socketserver',
'ConfigParser': 'configparser',
'repr': 'reprlib',
'FileDialog': 'tkinter.filedialog',
'tkFileDialog': 'tkinter.filedialog',
'SimpleDialog': 'tkinter.simpledialog',
'tkSimpleDialog': 'tkinter.simpledialog',
'tkColorChooser': 'tkinter.colorchooser',
'tkCommonDialog': 'tkinter.commondialog',
'Dialog': 'tkinter.dialog',
'Tkdnd': 'tkinter.dnd',
'tkFont': 'tkinter.font',
'tkMessageBox': 'tkinter.messagebox',
'ScrolledText': 'tkinter.scrolledtext',
'Tkconstants': 'tkinter.constants',
'Tix': 'tkinter.tix',
'ttk': 'tkinter.ttk',
'Tkinter': 'tkinter',
'markupbase': '_markupbase',
'_winreg': 'winreg',
'thread': '_thread',
'dummy_thread': '_dummy_thread',
# anydbm and whichdb are handled by fix_imports2
'dbhash': 'dbm.bsd',
'dumbdbm': 'dbm.dumb',
'dbm': 'dbm.ndbm',
'gdbm': 'dbm.gnu',
'xmlrpclib': 'xmlrpc.client',
'DocXMLRPCServer': 'xmlrpc.server',
'SimpleXMLRPCServer': 'xmlrpc.server',
'httplib': 'http.client',
'htmlentitydefs' : 'html.entities',
'HTMLParser' : 'html.parser',
'Cookie': 'http.cookies',
'cookielib': 'http.cookiejar',
'BaseHTTPServer': 'http.server',
'SimpleHTTPServer': 'http.server',
'CGIHTTPServer': 'http.server',
#'test.test_support': 'test.support',
'commands': 'subprocess',
'UserString' : 'collections',
'UserList' : 'collections',
'urlparse' : 'urllib.parse',
'robotparser' : 'urllib.robotparser',
}
def alternates(members):
return "(" + "|".join(map(repr, members)) + ")"
def build_pattern(mapping=MAPPING):
mod_list = ' | '.join(["module_name='%s'" % key for key in mapping])
bare_names = alternates(mapping.keys())
yield """name_import=import_name< 'import' ((%s) |
multiple_imports=dotted_as_names< any* (%s) any* >) >
""" % (mod_list, mod_list)
yield """import_from< 'from' (%s) 'import' ['(']
( any | import_as_name< any 'as' any > |
import_as_names< any* >) [')'] >
""" % mod_list
yield """import_name< 'import' (dotted_as_name< (%s) 'as' any > |
multiple_imports=dotted_as_names<
any* dotted_as_name< (%s) 'as' any > any* >) >
""" % (mod_list, mod_list)
# Find usages of module members in code e.g. thread.foo(bar)
yield "power< bare_with_attr=(%s) trailer<'.' any > any* >" % bare_names
class FixImports(fixer_base.BaseFix):
BM_compatible = True
keep_line_order = True
# This is overridden in fix_imports2.
mapping = MAPPING
# We want to run this fixer late, so fix_import doesn't try to make stdlib
# renames into relative imports.
run_order = 6
def build_pattern(self):
return "|".join(build_pattern(self.mapping))
def compile_pattern(self):
# We override this, so MAPPING can be pragmatically altered and the
# changes will be reflected in PATTERN.
self.PATTERN = self.build_pattern()
super(FixImports, self).compile_pattern()
# Don't match the node if it's within another match.
def match(self, node):
match = super(FixImports, self).match
results = match(node)
if results:
# Module usage could be in the trailer of an attribute lookup, so we
# might have nested matches when "bare_with_attr" is present.
if "bare_with_attr" not in results and \
any(match(obj) for obj in attr_chain(node, "parent")):
return False
return results
return False
def start_tree(self, tree, filename):
super(FixImports, self).start_tree(tree, filename)
self.replace = {}
def transform(self, node, results):
import_mod = results.get("module_name")
if import_mod:
mod_name = import_mod.value
new_name = self.mapping[mod_name]
import_mod.replace(Name(new_name, prefix=import_mod.prefix))
if "name_import" in results:
# If it's not a "from x import x, y" or "import x as y" import,
# marked its usage to be replaced.
self.replace[mod_name] = new_name
if "multiple_imports" in results:
# This is a nasty hack to fix multiple imports on a line (e.g.,
# "import StringIO, urlparse"). The problem is that I can't
# figure out an easy way to make a pattern recognize the keys of
# MAPPING randomly sprinkled in an import statement.
results = self.match(node)
if results:
self.transform(node, results)
else:
# Replace usage of the module.
bare_name = results["bare_with_attr"][0]
new_name = self.replace.get(bare_name.value)
if new_name:
bare_name.replace(Name(new_name, prefix=bare_name.prefix))
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
"""Fix incompatible imports and module references."""
# Authors: Collin Winter, Nick Edds
# Local imports
from .. import fixer_base
from ..fixer_util import Name, attr_chain
MAPPING = {'StringIO': 'io',
'cStringIO': 'io',
'cPickle': 'pickle',
'__builtin__' : 'builtins',
'copy_reg': 'copyreg',
'Queue': 'queue',
'SocketServer': 'socketserver',
'ConfigParser': 'configparser',
'repr': 'reprlib',
'FileDialog': 'tkinter.filedialog',
'tkFileDialog': 'tkinter.filedialog',
'SimpleDialog': 'tkinter.simpledialog',
'tkSimpleDialog': 'tkinter.simpledialog',
'tkColorChooser': 'tkinter.colorchooser',
'tkCommonDialog': 'tkinter.commondialog',
'Dialog': 'tkinter.dialog',
'Tkdnd': 'tkinter.dnd',
'tkFont': 'tkinter.font',
'tkMessageBox': 'tkinter.messagebox',
'ScrolledText': 'tkinter.scrolledtext',
'Tkconstants': 'tkinter.constants',
'Tix': 'tkinter.tix',
'ttk': 'tkinter.ttk',
'Tkinter': 'tkinter',
'markupbase': '_markupbase',
'_winreg': 'winreg',
'thread': '_thread',
'dummy_thread': '_dummy_thread',
# anydbm and whichdb are handled by fix_imports2
'dbhash': 'dbm.bsd',
'dumbdbm': 'dbm.dumb',
'dbm': 'dbm.ndbm',
'gdbm': 'dbm.gnu',
'xmlrpclib': 'xmlrpc.client',
'DocXMLRPCServer': 'xmlrpc.server',
'SimpleXMLRPCServer': 'xmlrpc.server',
'httplib': 'http.client',
'htmlentitydefs' : 'html.entities',
'HTMLParser' : 'html.parser',
'Cookie': 'http.cookies',
'cookielib': 'http.cookiejar',
'BaseHTTPServer': 'http.server',
'SimpleHTTPServer': 'http.server',
'CGIHTTPServer': 'http.server',
#'test.test_support': 'test.support',
'commands': 'subprocess',
'UserString' : 'collections',
'UserList' : 'collections',
'urlparse' : 'urllib.parse',
'robotparser' : 'urllib.robotparser',
}
def alternates(members):
return "(" + "|".join(map(repr, members)) + ")"
def build_pattern(mapping=MAPPING):
mod_list = ' | '.join(["module_name='%s'" % key for key in mapping])
bare_names = alternates(mapping.keys())
yield """name_import=import_name< 'import' ((%s) |
multiple_imports=dotted_as_names< any* (%s) any* >) >
""" % (mod_list, mod_list)
yield """import_from< 'from' (%s) 'import' ['(']
( any | import_as_name< any 'as' any > |
import_as_names< any* >) [')'] >
""" % mod_list
yield """import_name< 'import' (dotted_as_name< (%s) 'as' any > |
multiple_imports=dotted_as_names<
any* dotted_as_name< (%s) 'as' any > any* >) >
""" % (mod_list, mod_list)
# Find usages of module members in code e.g. thread.foo(bar)
yield "power< bare_with_attr=(%s) trailer<'.' any > any* >" % bare_names
class FixImports(fixer_base.BaseFix):
BM_compatible = True
keep_line_order = True
# This is overridden in fix_imports2.
mapping = MAPPING
# We want to run this fixer late, so fix_import doesn't try to make stdlib
# renames into relative imports.
run_order = 6
def build_pattern(self):
return "|".join(build_pattern(self.mapping))
def compile_pattern(self):
# We override this, so MAPPING can be pragmatically altered and the
# changes will be reflected in PATTERN.
self.PATTERN = self.build_pattern()
super(FixImports, self).compile_pattern()
# Don't match the node if it's within another match.
def match(self, node):
match = super(FixImports, self).match
results = match(node)
if results:
# Module usage could be in the trailer of an attribute lookup, so we
# might have nested matches when "bare_with_attr" is present.
if "bare_with_attr" not in results and \
any(match(obj) for obj in attr_chain(node, "parent")):
return False
return results
return False
def start_tree(self, tree, filename):
super(FixImports, self).start_tree(tree, filename)
self.replace = {}
def transform(self, node, results):
import_mod = results.get("module_name")
if import_mod:
mod_name = import_mod.value
new_name = self.mapping[mod_name]
import_mod.replace(Name(new_name, prefix=import_mod.prefix))
if "name_import" in results:
# If it's not a "from x import x, y" or "import x as y" import,
# marked its usage to be replaced.
self.replace[mod_name] = new_name
if "multiple_imports" in results:
# This is a nasty hack to fix multiple imports on a line (e.g.,
# "import StringIO, urlparse"). The problem is that I can't
# figure out an easy way to make a pattern recognize the keys of
# MAPPING randomly sprinkled in an import statement.
results = self.match(node)
if results:
self.transform(node, results)
else:
# Replace usage of the module.
bare_name = results["bare_with_attr"][0]
new_name = self.replace.get(bare_name.value)
if new_name:
bare_name.replace(Name(new_name, prefix=bare_name.prefix))
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| |
import os
import random
import shutil
import time
from unittest.mock import patch
import dask.dataframe as dd
import math
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import ray
from ray.util.dask import ray_dask_get
from ray.tests.conftest import * # noqa
from ray.experimental.data.datasource import DummyOutputDatasource
from ray.experimental.data.impl.block import Block
import ray.experimental.data.tests.util as util
def test_basic_actors(shutdown_only):
ray.init(num_cpus=2)
ds = ray.experimental.data.range(5)
assert sorted(ds.map(lambda x: x + 1,
compute="actors").take()) == [1, 2, 3, 4, 5]
def test_basic(ray_start_regular_shared):
ds = ray.experimental.data.range(5)
assert sorted(ds.map(lambda x: x + 1).take()) == [1, 2, 3, 4, 5]
assert ds.count() == 5
assert sorted(ds.iter_rows()) == [0, 1, 2, 3, 4]
def test_write_datasource(ray_start_regular_shared):
output = DummyOutputDatasource()
ds = ray.experimental.data.range(10, parallelism=2)
ds.write_datasource(output)
assert output.num_ok == 1
assert output.num_failed == 0
assert ray.get(output.data_sink.get_rows_written.remote()) == 10
ray.get(output.data_sink.set_enabled.remote(False))
with pytest.raises(ValueError):
ds.write_datasource(output)
assert output.num_ok == 1
assert output.num_failed == 1
assert ray.get(output.data_sink.get_rows_written.remote()) == 10
def test_empty_dataset(ray_start_regular_shared):
ds = ray.experimental.data.range(0)
assert ds.count() == 0
assert ds.size_bytes() is None
assert ds.schema() is None
ds = ray.experimental.data.range(1)
ds = ds.filter(lambda x: x > 1)
assert str(ds) == \
"Dataset(num_rows=0, num_blocks=1, schema=Unknown schema)"
def test_schema(ray_start_regular_shared):
ds = ray.experimental.data.range(10)
ds2 = ray.experimental.data.range_arrow(10)
ds3 = ds2.repartition(5)
ds4 = ds3.map(lambda x: {"a": "hi", "b": 1.0}).limit(5).repartition(1)
assert str(ds) == \
"Dataset(num_rows=10, num_blocks=10, schema=<class 'int'>)"
assert str(ds2) == \
"Dataset(num_rows=10, num_blocks=10, schema={value: int64})"
assert str(ds3) == \
"Dataset(num_rows=10, num_blocks=5, schema={value: int64})"
assert str(ds4) == \
"Dataset(num_rows=5, num_blocks=1, schema={a: string, b: double})"
def test_lazy_loading_exponential_rampup(ray_start_regular_shared):
ds = ray.experimental.data.range(100, parallelism=20)
assert len(ds._blocks._blocks) == 1
assert ds.take(10) == list(range(10))
assert len(ds._blocks._blocks) == 2
assert ds.take(20) == list(range(20))
assert len(ds._blocks._blocks) == 4
assert ds.take(30) == list(range(30))
assert len(ds._blocks._blocks) == 8
assert ds.take(50) == list(range(50))
assert len(ds._blocks._blocks) == 16
assert ds.take(100) == list(range(100))
assert len(ds._blocks._blocks) == 20
def test_limit(ray_start_regular_shared):
ds = ray.experimental.data.range(100, parallelism=20)
for i in range(100):
assert ds.limit(i).take(200) == list(range(i))
def test_convert_types(ray_start_regular_shared):
plain_ds = ray.experimental.data.range(1)
arrow_ds = plain_ds.map(lambda x: {"a": x})
assert arrow_ds.take() == [{"a": 0}]
assert "ArrowRow" in arrow_ds.map(lambda x: str(x)).take()[0]
arrow_ds = ray.experimental.data.range_arrow(1)
assert arrow_ds.map(lambda x: "plain_{}".format(x["value"])).take() \
== ["plain_0"]
assert arrow_ds.map(lambda x: {"a": (x["value"],)}).take() == \
[{"a": (0,)}]
def test_from_items(ray_start_regular_shared):
ds = ray.experimental.data.from_items(["hello", "world"])
assert ds.take() == ["hello", "world"]
def test_repartition(ray_start_regular_shared):
ds = ray.experimental.data.range(20, parallelism=10)
assert ds.num_blocks() == 10
assert ds.sum() == 190
assert ds._block_sizes() == [2] * 10
ds2 = ds.repartition(5)
assert ds2.num_blocks() == 5
assert ds2.sum() == 190
# TODO: would be nice to re-distribute these more evenly
ds2._block_sizes() == [10, 10, 0, 0, 0]
ds3 = ds2.repartition(20)
assert ds3.num_blocks() == 20
assert ds3.sum() == 190
ds2._block_sizes() == [2] * 10 + [0] * 10
large = ray.experimental.data.range(10000, parallelism=10)
large = large.repartition(20)
assert large._block_sizes() == [500] * 20
def test_repartition_arrow(ray_start_regular_shared):
ds = ray.experimental.data.range_arrow(20, parallelism=10)
assert ds.num_blocks() == 10
assert ds.count() == 20
assert ds._block_sizes() == [2] * 10
ds2 = ds.repartition(5)
assert ds2.num_blocks() == 5
assert ds2.count() == 20
ds2._block_sizes() == [10, 10, 0, 0, 0]
ds3 = ds2.repartition(20)
assert ds3.num_blocks() == 20
assert ds3.count() == 20
ds2._block_sizes() == [2] * 10 + [0] * 10
large = ray.experimental.data.range_arrow(10000, parallelism=10)
large = large.repartition(20)
assert large._block_sizes() == [500] * 20
def test_from_pandas(ray_start_regular_shared):
df1 = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]})
df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]})
ds = ray.experimental.data.from_pandas([ray.put(df1), ray.put(df2)])
values = [(r["one"], r["two"]) for r in ds.take(6)]
rows = [(r.one, r.two) for _, r in pd.concat([df1, df2]).iterrows()]
assert values == rows
def test_to_pandas(ray_start_regular_shared):
n = 5
df = pd.DataFrame({"value": list(range(n))})
ds = ray.experimental.data.range_arrow(n)
dfds = pd.concat(ray.get(ds.to_pandas()), ignore_index=True)
assert df.equals(dfds)
def test_pandas_roundtrip(ray_start_regular_shared):
df1 = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]})
df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]})
ds = ray.experimental.data.from_pandas([ray.put(df1), ray.put(df2)])
dfds = pd.concat(ray.get(ds.to_pandas()))
assert pd.concat([df1, df2]).equals(dfds)
def test_parquet_read(ray_start_regular_shared, tmp_path):
df1 = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]})
table = pa.Table.from_pandas(df1)
pq.write_table(table, os.path.join(str(tmp_path), "test1.parquet"))
df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]})
table = pa.Table.from_pandas(df2)
pq.write_table(table, os.path.join(str(tmp_path), "test2.parquet"))
ds = ray.experimental.data.read_parquet(str(tmp_path))
# Test metadata-only parquet ops.
assert len(ds._blocks._blocks) == 1
assert ds.count() == 6
assert ds.size_bytes() > 0
assert ds.schema() is not None
input_files = ds.input_files()
assert len(input_files) == 2, input_files
assert "test1.parquet" in str(input_files)
assert "test2.parquet" in str(input_files)
assert str(ds) == \
"Dataset(num_rows=6, num_blocks=2, " \
"schema={one: int64, two: string})", ds
assert repr(ds) == \
"Dataset(num_rows=6, num_blocks=2, " \
"schema={one: int64, two: string})", ds
assert len(ds._blocks._blocks) == 1
# Forces a data read.
values = [[s["one"], s["two"]] for s in ds.take()]
assert len(ds._blocks._blocks) == 2
assert sorted(values) == [[1, "a"], [2, "b"], [3, "c"], [4, "e"], [5, "f"],
[6, "g"]]
def test_parquet_write(ray_start_regular_shared, tmp_path):
df1 = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]})
df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]})
df = pd.concat([df1, df2])
ds = ray.experimental.data.from_pandas([ray.put(df1), ray.put(df2)])
path = os.path.join(tmp_path, "test_parquet_dir")
os.mkdir(path)
ds.write_parquet(path)
path1 = os.path.join(path, "data0.parquet")
path2 = os.path.join(path, "data1.parquet")
dfds = pd.concat([pd.read_parquet(path1), pd.read_parquet(path2)])
assert df.equals(dfds)
def test_convert_to_pyarrow(ray_start_regular_shared, tmp_path):
ds = ray.experimental.data.range(100)
assert ds.to_dask().sum().compute()[0] == 4950
path = os.path.join(tmp_path, "test_parquet_dir")
os.mkdir(path)
ds.write_parquet(path)
assert ray.experimental.data.read_parquet(path).count() == 100
def test_pyarrow(ray_start_regular_shared):
ds = ray.experimental.data.range_arrow(5)
assert ds.map(lambda x: {"b": x["value"] + 2}).take() == \
[{"b": 2}, {"b": 3}, {"b": 4}, {"b": 5}, {"b": 6}]
assert ds.map(lambda x: {"b": x["value"] + 2}) \
.filter(lambda x: x["b"] % 2 == 0).take() == \
[{"b": 2}, {"b": 4}, {"b": 6}]
assert ds.filter(lambda x: x["value"] == 0) \
.flat_map(lambda x: [{"b": x["value"] + 2}, {"b": x["value"] + 20}]) \
.take() == [{"b": 2}, {"b": 20}]
def test_uri_parser():
from ray.experimental.data.read_api import _parse_paths
fs, path = _parse_paths("/local/path")
assert path == "/local/path"
assert fs.type_name == "local"
fs, path = _parse_paths("./")
assert path == "./"
assert fs.type_name == "local"
fs, path = _parse_paths("s3://bucket/dir")
assert path == "bucket/dir"
assert fs.type_name == "s3"
fs, path = _parse_paths(["s3://bucket/dir_1", "s3://bucket/dir_2"])
assert path == ["bucket/dir_1", "bucket/dir_2"]
assert fs.type_name == "s3"
with pytest.raises(ValueError):
_parse_paths(["s3://bucket/dir_1", "/path/local"])
with pytest.raises(ValueError):
_parse_paths([])
def test_read_binary_files(ray_start_regular_shared):
with util.gen_bin_files(10) as (_, paths):
ds = ray.experimental.data.read_binary_files(paths, parallelism=10)
for i, item in enumerate(ds.iter_rows()):
expected = open(paths[i], "rb").read()
assert expected == item
# Test metadata ops.
assert ds.count() == 10
assert "bytes" in str(ds.schema()), ds
assert "bytes" in str(ds), ds
def test_read_binary_files_with_paths(ray_start_regular_shared):
with util.gen_bin_files(10) as (_, paths):
ds = ray.experimental.data.read_binary_files(
paths, include_paths=True, parallelism=10)
for i, (path, item) in enumerate(ds.iter_rows()):
assert path == paths[i]
expected = open(paths[i], "rb").read()
assert expected == item
def test_read_binary_files_with_fs(ray_start_regular_shared):
with util.gen_bin_files(10) as (tempdir, paths):
# All the paths are absolute, so we want the root file system.
fs, _ = pa.fs.FileSystem.from_uri("/")
ds = ray.experimental.data.read_binary_files(
paths, filesystem=fs, parallelism=10)
for i, item in enumerate(ds.iter_rows()):
expected = open(paths[i], "rb").read()
assert expected == item
def test_iter_batches_basic(ray_start_regular_shared):
df1 = pd.DataFrame({"one": [1, 2, 3], "two": [2, 3, 4]})
df2 = pd.DataFrame({"one": [4, 5, 6], "two": [5, 6, 7]})
df3 = pd.DataFrame({"one": [7, 8, 9], "two": [8, 9, 10]})
df4 = pd.DataFrame({"one": [10, 11, 12], "two": [11, 12, 13]})
dfs = [df1, df2, df3, df4]
ds = ray.experimental.data.from_pandas(
[ray.put(df1), ray.put(df2),
ray.put(df3), ray.put(df4)])
# Default.
for batch, df in zip(ds.iter_batches(), dfs):
assert isinstance(batch, pd.DataFrame)
assert batch.equals(df)
# pyarrow.Table format.
for batch, df in zip(ds.iter_batches(batch_format="pyarrow"), dfs):
assert isinstance(batch, pa.Table)
assert batch.equals(pa.Table.from_pandas(df))
# blocks format.
for batch, df in zip(ds.iter_batches(batch_format="_blocks"), dfs):
assert isinstance(batch, Block)
assert batch.to_pandas().equals(df)
# Batch size.
batch_size = 2
batches = list(ds.iter_batches(batch_size=batch_size))
assert all(len(batch) == batch_size for batch in batches)
assert (len(batches) == math.ceil(
(len(df1) + len(df2) + len(df3) + len(df4)) / batch_size))
assert pd.concat(
batches, ignore_index=True).equals(pd.concat(dfs, ignore_index=True))
# Batch size larger than block.
batch_size = 4
batches = list(ds.iter_batches(batch_size=batch_size))
assert all(len(batch) == batch_size for batch in batches)
assert (len(batches) == math.ceil(
(len(df1) + len(df2) + len(df3) + len(df4)) / batch_size))
assert pd.concat(
batches, ignore_index=True).equals(pd.concat(dfs, ignore_index=True))
# Batch size drop partial.
batch_size = 5
batches = list(ds.iter_batches(batch_size=batch_size, drop_last=True))
assert all(len(batch) == batch_size for batch in batches)
assert (len(batches) == (len(df1) + len(df2) + len(df3) + len(df4)) //
batch_size)
assert pd.concat(
batches, ignore_index=True).equals(
pd.concat(dfs, ignore_index=True)[:10])
# Batch size don't drop partial.
batch_size = 5
batches = list(ds.iter_batches(batch_size=batch_size, drop_last=False))
assert all(len(batch) == batch_size for batch in batches[:-1])
assert (len(batches[-1]) == (len(df1) + len(df2) + len(df3) + len(df4)) %
batch_size)
assert (len(batches) == math.ceil(
(len(df1) + len(df2) + len(df3) + len(df4)) / batch_size))
assert pd.concat(
batches, ignore_index=True).equals(pd.concat(dfs, ignore_index=True))
# Prefetch.
for batch, df in zip(ds.iter_batches(prefetch_blocks=1), dfs):
assert isinstance(batch, pd.DataFrame)
assert batch.equals(df)
def test_iter_batches_grid(ray_start_regular_shared):
# Tests slicing, batch combining, and partial batch dropping logic over
# a grid of dataset, batching, and dropping configurations.
# Grid: num_blocks x num_rows_block_1 x ... x num_rows_block_N x
# batch_size x drop_last
seed = int(time.time())
print(f"Seeding RNG for test_iter_batches_grid with: {seed}")
random.seed(seed)
max_num_blocks = 20
max_num_rows_per_block = 20
num_blocks_samples = 3
block_sizes_samples = 3
batch_size_samples = 3
for num_blocks in np.random.randint(
1, max_num_blocks + 1, size=num_blocks_samples):
block_sizes_list = [
np.random.randint(1, max_num_rows_per_block + 1, size=num_blocks)
for _ in range(block_sizes_samples)
]
for block_sizes in block_sizes_list:
# Create the dataset with the given block sizes.
dfs = []
running_size = 0
for block_size in block_sizes:
dfs.append(
pd.DataFrame({
"value": list(
range(running_size, running_size + block_size))
}))
running_size += block_size
num_rows = running_size
ds = ray.experimental.data.from_pandas([ray.put(df) for df in dfs])
for batch_size in np.random.randint(
1, num_rows + 1, size=batch_size_samples):
for drop_last in (False, True):
batches = list(
ds.iter_batches(
batch_size=batch_size, drop_last=drop_last))
if num_rows % batch_size == 0 or not drop_last:
# Number of batches should be equal to
# num_rows / batch_size, rounded up.
assert len(batches) == math.ceil(num_rows / batch_size)
# Concatenated batches should equal the DataFrame
# representation of the entire dataset.
assert pd.concat(
batches, ignore_index=True).equals(
pd.concat(
ray.get(ds.to_pandas()),
ignore_index=True))
else:
# Number of batches should be equal to
# num_rows / batch_size, rounded down.
assert len(batches) == num_rows // batch_size
# Concatenated batches should equal the DataFrame
# representation of the dataset with the partial batch
# remainder sliced off.
assert pd.concat(
batches, ignore_index=True).equals(
pd.concat(
ray.get(ds.to_pandas()), ignore_index=True)
[:batch_size * (num_rows // batch_size)])
if num_rows % batch_size == 0 or drop_last:
assert all(
len(batch) == batch_size for batch in batches)
else:
assert all(
len(batch) == batch_size for batch in batches[:-1])
assert len(batches[-1]) == num_rows % batch_size
def test_lazy_loading_iter_batches_exponential_rampup(
ray_start_regular_shared):
ds = ray.experimental.data.range(32, parallelism=8)
expected_num_blocks = [1, 2, 4, 4, 8, 8, 8, 8]
for _, expected in zip(ds.iter_batches(), expected_num_blocks):
assert len(ds._blocks._blocks) == expected
def test_map_batch(ray_start_regular_shared, tmp_path):
# Test input validation
ds = ray.experimental.data.range(5)
with pytest.raises(ValueError):
ds.map_batches(
lambda x: x + 1, batch_format="pyarrow", batch_size=-1).take()
# Test pandas
df = pd.DataFrame({"one": [1, 2, 3], "two": [2, 3, 4]})
table = pa.Table.from_pandas(df)
pq.write_table(table, os.path.join(tmp_path, "test1.parquet"))
ds = ray.experimental.data.read_parquet(str(tmp_path))
ds_list = ds.map_batches(lambda df: df + 1, batch_size=1).take()
values = [s["one"] for s in ds_list]
assert values == [2, 3, 4]
values = [s["two"] for s in ds_list]
assert values == [3, 4, 5]
# Test Pyarrow
ds = ray.experimental.data.read_parquet(str(tmp_path))
ds_list = ds.map_batches(
lambda pa: pa, batch_size=1, batch_format="pyarrow").take()
values = [s["one"] for s in ds_list]
assert values == [1, 2, 3]
values = [s["two"] for s in ds_list]
assert values == [2, 3, 4]
# Test batch
size = 300
ds = ray.experimental.data.range(size)
ds_list = ds.map_batches(lambda df: df + 1, batch_size=17).take(limit=size)
for i in range(size):
# The pandas column is "0", and it originally has rows from 0~299.
# After the map batch, it should have 1~300.
row = ds_list[i]
assert row["0"] == i + 1
assert ds.count() == 300
# Test the lambda returns different types than the batch_format
# pandas => list block
ds = ray.experimental.data.read_parquet(str(tmp_path))
ds_list = ds.map_batches(lambda df: [1], batch_size=1).take()
assert ds_list == [1, 1, 1]
assert ds.count() == 3
# pyarrow => list block
ds = ray.experimental.data.read_parquet(str(tmp_path))
ds_list = ds.map_batches(
lambda df: [1], batch_size=1, batch_format="pyarrow").take()
assert ds_list == [1, 1, 1]
assert ds.count() == 3
# Test the wrong return value raises an exception.
ds = ray.experimental.data.read_parquet(str(tmp_path))
with pytest.raises(ValueError):
ds_list = ds.map_batches(
lambda df: 1, batch_size=2, batch_format="pyarrow").take()
def test_split(ray_start_regular_shared):
ds = ray.experimental.data.range(20, parallelism=10)
assert ds.num_blocks() == 10
assert ds.sum() == 190
assert ds._block_sizes() == [2] * 10
datasets = ds.split(5)
assert [2] * 5 == [len(dataset._blocks) for dataset in datasets]
assert 190 == sum([dataset.sum() for dataset in datasets])
datasets = ds.split(3)
assert [4, 3, 3] == [len(dataset._blocks) for dataset in datasets]
assert 190 == sum([dataset.sum() for dataset in datasets])
datasets = ds.split(1)
assert [10] == [len(dataset._blocks) for dataset in datasets]
assert 190 == sum([dataset.sum() for dataset in datasets])
datasets = ds.split(10)
assert [1] * 10 == [len(dataset._blocks) for dataset in datasets]
assert 190 == sum([dataset.sum() for dataset in datasets])
datasets = ds.split(11)
assert [1] * 10 + [0] == [len(dataset._blocks) for dataset in datasets]
assert 190 == sum([dataset.sum() for dataset in datasets])
def test_split_hints(ray_start_regular_shared):
@ray.remote
class Actor(object):
def __init__(self):
pass
def assert_split_assignment(block_node_ids, actor_node_ids,
expected_split_result):
"""Helper function to setup split hints test.
Args:
block_node_ids: a list of blocks with their locations. For
example ["node1", "node2"] represents two blocks with
"node1", "node2" as their location respectively.
actor_node_ids: a list of actors with their locations. For
example ["node1", "node2"] represents two actors with
"node1", "node2" as their location respectively.
expected_split_result: a list of allocation result, each entry
in the list stores the block_index in the split dataset.
For example, [[0, 1], [2]] represents the split result has
two datasets, datasets[0] contains block 0 and 1; and
datasets[1] contains block 2.
"""
num_blocks = len(block_node_ids)
ds = ray.experimental.data.range(num_blocks, parallelism=num_blocks)
blocks = list(ds._blocks)
assert len(block_node_ids) == len(blocks)
actors = [Actor.remote() for i in range(len(actor_node_ids))]
with patch("ray.experimental.get_object_locations") as location_mock:
with patch("ray.state.actors") as state_mock:
block_locations = {}
for i, node_id in enumerate(block_node_ids):
if node_id:
block_locations[blocks[i]] = {"node_ids": [node_id]}
location_mock.return_value = block_locations
actor_state = {}
for i, node_id in enumerate(actor_node_ids):
actor_state[actors[i]._actor_id.hex()] = {
"Address": {
"NodeID": node_id
}
}
state_mock.return_value = actor_state
datasets = ds.split(len(actors), actors)
assert len(datasets) == len(actors)
for i in range(len(actors)):
assert {blocks[j]
for j in expected_split_result[i]} == set(
datasets[i]._blocks)
assert_split_assignment(["node2", "node1", "node1"], ["node1", "node2"],
[[1, 2], [0]])
assert_split_assignment(["node1", "node1", "node1"], ["node1", "node2"],
[[2, 1], [0]])
assert_split_assignment(["node2", "node2", None], ["node1", "node2"],
[[0, 2], [1]])
assert_split_assignment(["node2", "node2", None], [None, None],
[[2, 1], [0]])
assert_split_assignment(["n1", "n2", "n3", "n1", "n2"], ["n1", "n2"],
[[0, 2, 3], [1, 4]])
assert_split_assignment(["n1", "n2"], ["n1", "n2", "n3"], [[0], [1], []])
# perfect split:
#
# split 300 blocks
# with node_ids interleaving between "n0", "n1", "n2"
#
# to 3 actors
# with has node_id "n1", "n2", "n0"
#
# expect that block 1, 4, 7... are assigned to actor with node_id n1
# block 2, 5, 8... are assigned to actor with node_id n2
# block 0, 3, 6... are assigned to actor with node_id n0
assert_split_assignment(
["n0", "n1", "n2"] * 100, ["n1", "n2", "n0"],
[range(1, 300, 3),
range(2, 300, 3),
range(0, 300, 3)])
# even split regardless of locality:
#
# split 301 blocks
# with block 0 to block 50 on "n0",
# block 51 to block 300 on "n1"
#
# to 3 actors
# with node_ids "n1", "n2", "n0"
#
# expect that block 200 to block 300 are assigned to actor with node_id n1
# block 100 to block 199 are assigned to actor with node_id n2
# block 0 to block 99 are assigned to actor with node_id n0
assert_split_assignment(["n0"] * 50 + ["n1"] * 251, ["n1", "n2", "n0"], [
range(200, 301),
range(100, 200),
list(range(0, 50)) + list(range(50, 100))
])
def test_from_dask(ray_start_regular_shared):
df = pd.DataFrame({"one": list(range(100)), "two": list(range(100))})
ddf = dd.from_pandas(df, npartitions=10)
ds = ray.experimental.data.from_dask(ddf)
dfds = pd.concat(ray.get(ds.to_pandas()))
assert df.equals(dfds)
def test_to_dask(ray_start_regular_shared):
df1 = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]})
df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]})
df = pd.concat([df1, df2])
ds = ray.experimental.data.from_pandas([ray.put(df1), ray.put(df2)])
ddf = ds.to_dask()
# Explicit Dask-on-Ray
assert df.equals(ddf.compute(scheduler=ray_dask_get))
# Implicit Dask-on-Ray.
assert df.equals(ddf.compute())
def test_json_read(ray_start_regular_shared, tmp_path):
# Single file.
df1 = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]})
path1 = os.path.join(tmp_path, "test1.json")
df1.to_json(path1, orient="records", lines=True)
ds = ray.experimental.data.read_json(path1)
assert df1.equals(ray.get(ds.to_pandas())[0])
# Test metadata ops.
assert ds.count() == 3
assert ds.input_files() == [path1]
assert ds.schema() is None
# Two files, parallelism=2.
df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]})
path2 = os.path.join(tmp_path, "test2.json")
df2.to_json(path2, orient="records", lines=True)
ds = ray.experimental.data.read_json([path1, path2], parallelism=2)
dsdf = pd.concat(ray.get(ds.to_pandas()))
assert pd.concat([df1, df2]).equals(dsdf)
# Test metadata ops.
for block, meta in zip(ds._blocks, ds._blocks.get_metadata()):
ray.get(block).size_bytes() == meta.size_bytes
# Three files, parallelism=2.
df3 = pd.DataFrame({"one": [7, 8, 9], "two": ["h", "i", "j"]})
path3 = os.path.join(tmp_path, "test3.json")
df3.to_json(path3, orient="records", lines=True)
df = pd.concat([df1, df2, df3], ignore_index=True)
ds = ray.experimental.data.read_json([path1, path2, path3], parallelism=2)
dsdf = pd.concat(ray.get(ds.to_pandas()), ignore_index=True)
assert df.equals(dsdf)
# Directory, two files.
path = os.path.join(tmp_path, "test_json_dir")
os.mkdir(path)
df1 = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]})
path1 = os.path.join(path, "data0.json")
df1.to_json(path1, orient="records", lines=True)
df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]})
path2 = os.path.join(path, "data1.json")
df2.to_json(path2, orient="records", lines=True)
ds = ray.experimental.data.read_json(path)
df = pd.concat([df1, df2])
dsdf = pd.concat(ray.get(ds.to_pandas()))
assert df.equals(dsdf)
shutil.rmtree(path)
# Two directories, three files.
path1 = os.path.join(tmp_path, "test_json_dir1")
path2 = os.path.join(tmp_path, "test_json_dir2")
os.mkdir(path1)
os.mkdir(path2)
df1 = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]})
file_path1 = os.path.join(path1, "data0.json")
df1.to_json(file_path1, orient="records", lines=True)
df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]})
file_path2 = os.path.join(path2, "data1.json")
df2.to_json(file_path2, orient="records", lines=True)
df3 = pd.DataFrame({"one": [7, 8, 9], "two": ["h", "i", "j"]})
file_path3 = os.path.join(path2, "data2.json")
df3.to_json(file_path3, orient="records", lines=True)
ds = ray.experimental.data.read_json([path1, path2])
df = pd.concat([df1, df2, df3])
dsdf = pd.concat(ray.get(ds.to_pandas()))
assert df.equals(dsdf)
shutil.rmtree(path1)
shutil.rmtree(path2)
# Directory and file, two files.
dir_path = os.path.join(tmp_path, "test_json_dir")
os.mkdir(dir_path)
df1 = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]})
path1 = os.path.join(dir_path, "data0.json")
df1.to_json(path1, orient="records", lines=True)
df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]})
path2 = os.path.join(tmp_path, "data1.json")
df2.to_json(path2, orient="records", lines=True)
ds = ray.experimental.data.read_json([dir_path, path2])
df = pd.concat([df1, df2])
dsdf = pd.concat(ray.get(ds.to_pandas()))
assert df.equals(dsdf)
shutil.rmtree(dir_path)
def test_json_write(ray_start_regular_shared, tmp_path):
path = os.path.join(tmp_path, "test_json_dir")
# Single block.
os.mkdir(path)
df = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]})
ds = ray.experimental.data.from_pandas([ray.put(df)])
ds.write_json(path)
file_path = os.path.join(path, "data0.json")
assert df.equals(pd.read_json(file_path))
shutil.rmtree(path)
# Two blocks.
os.mkdir(path)
df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]})
ds = ray.experimental.data.from_pandas([ray.put(df), ray.put(df2)])
ds.write_json(path)
file_path2 = os.path.join(path, "data1.json")
assert pd.concat([df, df2]).equals(
pd.concat([pd.read_json(file_path),
pd.read_json(file_path2)]))
shutil.rmtree(path)
def test_csv_read(ray_start_regular_shared, tmp_path):
# Single file.
df1 = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]})
path1 = os.path.join(tmp_path, "test1.csv")
df1.to_csv(path1, index=False)
ds = ray.experimental.data.read_csv(path1)
dsdf = ray.get(ds.to_pandas())[0]
assert df1.equals(dsdf)
# Test metadata ops.
assert ds.count() == 3
assert ds.input_files() == [path1]
assert ds.schema() is None
# Two files, parallelism=2.
df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]})
path2 = os.path.join(tmp_path, "test2.csv")
df2.to_csv(path2, index=False)
ds = ray.experimental.data.read_csv([path1, path2], parallelism=2)
dsdf = pd.concat(ray.get(ds.to_pandas()))
df = pd.concat([df1, df2])
assert df.equals(dsdf)
# Test metadata ops.
for block, meta in zip(ds._blocks, ds._blocks.get_metadata()):
ray.get(block).size_bytes() == meta.size_bytes
# Three files, parallelism=2.
df3 = pd.DataFrame({"one": [7, 8, 9], "two": ["h", "i", "j"]})
path3 = os.path.join(tmp_path, "test3.csv")
df3.to_csv(path3, index=False)
ds = ray.experimental.data.read_csv([path1, path2, path3], parallelism=2)
df = pd.concat([df1, df2, df3], ignore_index=True)
dsdf = pd.concat(ray.get(ds.to_pandas()), ignore_index=True)
assert df.equals(dsdf)
# Directory, two files.
path = os.path.join(tmp_path, "test_csv_dir")
os.mkdir(path)
df1 = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]})
path1 = os.path.join(path, "data0.csv")
df1.to_csv(path1, index=False)
df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]})
path2 = os.path.join(path, "data1.csv")
df2.to_csv(path2, index=False)
ds = ray.experimental.data.read_csv(path)
df = pd.concat([df1, df2])
dsdf = pd.concat(ray.get(ds.to_pandas()))
assert df.equals(dsdf)
shutil.rmtree(path)
# Two directories, three files.
path1 = os.path.join(tmp_path, "test_csv_dir1")
path2 = os.path.join(tmp_path, "test_csv_dir2")
os.mkdir(path1)
os.mkdir(path2)
df1 = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]})
file_path1 = os.path.join(path1, "data0.csv")
df1.to_csv(file_path1, index=False)
df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]})
file_path2 = os.path.join(path2, "data1.csv")
df2.to_csv(file_path2, index=False)
df3 = pd.DataFrame({"one": [7, 8, 9], "two": ["h", "i", "j"]})
file_path3 = os.path.join(path2, "data2.csv")
df3.to_csv(file_path3, index=False)
ds = ray.experimental.data.read_csv([path1, path2])
df = pd.concat([df1, df2, df3])
dsdf = pd.concat(ray.get(ds.to_pandas()))
assert df.equals(dsdf)
shutil.rmtree(path1)
shutil.rmtree(path2)
# Directory and file, two files.
dir_path = os.path.join(tmp_path, "test_csv_dir")
os.mkdir(dir_path)
df1 = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]})
path1 = os.path.join(dir_path, "data0.csv")
df1.to_csv(path1, index=False)
df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]})
path2 = os.path.join(tmp_path, "data1.csv")
df2.to_csv(path2, index=False)
ds = ray.experimental.data.read_csv([dir_path, path2])
df = pd.concat([df1, df2])
dsdf = pd.concat(ray.get(ds.to_pandas()))
assert df.equals(dsdf)
shutil.rmtree(dir_path)
def test_csv_write(ray_start_regular_shared, tmp_path):
path = os.path.join(tmp_path, "test_csv_dir")
# Single block.
os.mkdir(path)
df = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]})
ds = ray.experimental.data.from_pandas([ray.put(df)])
ds.write_csv(path)
file_path = os.path.join(path, "data0.csv")
assert df.equals(pd.read_csv(file_path))
shutil.rmtree(path)
# Two blocks.
os.mkdir(path)
df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]})
ds = ray.experimental.data.from_pandas([ray.put(df), ray.put(df2)])
ds.write_csv(path)
file_path2 = os.path.join(path, "data1.csv")
assert pd.concat([df, df2]).equals(
pd.concat([pd.read_csv(file_path),
pd.read_csv(file_path2)]))
shutil.rmtree(path)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| |
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import errno
import functools
import os
import shutil
import subprocess
import sys
import tempfile
import threading
import time
import weakref
from oslo.config import cfg
from nova.openstack.common import fileutils
from nova.openstack.common.gettextutils import _ # noqa
from nova.openstack.common import local
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
util_opts = [
cfg.BoolOpt('disable_process_locking', default=False,
help='Whether to disable inter-process locks'),
cfg.StrOpt('lock_path',
default=os.environ.get("NOVA_LOCK_PATH"),
help=('Directory to use for lock files.'))
]
CONF = cfg.CONF
CONF.register_opts(util_opts)
def set_defaults(lock_path):
cfg.set_defaults(util_opts, lock_path=lock_path)
class _InterProcessLock(object):
"""Lock implementation which allows multiple locks, working around
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
not require any cleanup. Since the lock is always held on a file
descriptor rather than outside of the process, the lock gets dropped
automatically if the process crashes, even if __exit__ is not executed.
There are no guarantees regarding usage by multiple green threads in a
single process here. This lock works only between processes. Exclusive
access between local threads should be achieved using the semaphores
in the @synchronized decorator.
Note these locks are released when the descriptor is closed, so it's not
safe to close the file descriptor while another green thread holds the
lock. Just opening and closing the lock file can break synchronisation,
so lock files must be accessed only using this abstraction.
"""
def __init__(self, name):
self.lockfile = None
self.fname = name
def __enter__(self):
self.lockfile = open(self.fname, 'w')
while True:
try:
# Using non-blocking locks since green threads are not
# patched to deal with blocking locking calls.
# Also upon reading the MSDN docs for locking(), it seems
# to have a laughable 10 attempts "blocking" mechanism.
self.trylock()
return self
except IOError as e:
if e.errno in (errno.EACCES, errno.EAGAIN):
# external locks synchronise things like iptables
# updates - give it some time to prevent busy spinning
time.sleep(0.01)
else:
raise
def __exit__(self, exc_type, exc_val, exc_tb):
try:
self.unlock()
self.lockfile.close()
except IOError:
LOG.exception(_("Could not release the acquired lock `%s`"),
self.fname)
def trylock(self):
raise NotImplementedError()
def unlock(self):
raise NotImplementedError()
class _WindowsLock(_InterProcessLock):
def trylock(self):
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1)
def unlock(self):
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1)
class _PosixLock(_InterProcessLock):
def trylock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
def unlock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
if os.name == 'nt':
import msvcrt
InterProcessLock = _WindowsLock
else:
import fcntl
InterProcessLock = _PosixLock
_semaphores = weakref.WeakValueDictionary()
_semaphores_lock = threading.Lock()
@contextlib.contextmanager
def lock(name, lock_file_prefix=None, external=False, lock_path=None):
"""Context based lock
This function yields a `threading.Semaphore` instance (if we don't use
eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is
True, in which case, it'll yield an InterProcessLock instance.
:param lock_file_prefix: The lock_file_prefix argument is used to provide
lock files on disk with a meaningful prefix.
:param external: The external keyword argument denotes whether this lock
should work across multiple processes. This means that if two different
workers both run a a method decorated with @synchronized('mylock',
external=True), only one of them will execute at a time.
:param lock_path: The lock_path keyword argument is used to specify a
special location for external lock files to live. If nothing is set, then
CONF.lock_path is used as a default.
"""
with _semaphores_lock:
try:
sem = _semaphores[name]
except KeyError:
sem = threading.Semaphore()
_semaphores[name] = sem
with sem:
LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name})
# NOTE(mikal): I know this looks odd
if not hasattr(local.strong_store, 'locks_held'):
local.strong_store.locks_held = []
local.strong_store.locks_held.append(name)
try:
if external and not CONF.disable_process_locking:
LOG.debug(_('Attempting to grab file lock "%(lock)s"'),
{'lock': name})
# We need a copy of lock_path because it is non-local
local_lock_path = lock_path or CONF.lock_path
if not local_lock_path:
raise cfg.RequiredOptError('lock_path')
if not os.path.exists(local_lock_path):
fileutils.ensure_tree(local_lock_path)
LOG.info(_('Created lock path: %s'), local_lock_path)
def add_prefix(name, prefix):
if not prefix:
return name
sep = '' if prefix.endswith('-') else '-'
return '%s%s%s' % (prefix, sep, name)
# NOTE(mikal): the lock name cannot contain directory
# separators
lock_file_name = add_prefix(name.replace(os.sep, '_'),
lock_file_prefix)
lock_file_path = os.path.join(local_lock_path, lock_file_name)
try:
lock = InterProcessLock(lock_file_path)
with lock as lock:
LOG.debug(_('Got file lock "%(lock)s" at %(path)s'),
{'lock': name, 'path': lock_file_path})
yield lock
finally:
LOG.debug(_('Released file lock "%(lock)s" at %(path)s'),
{'lock': name, 'path': lock_file_path})
else:
yield sem
finally:
local.strong_store.locks_held.remove(name)
def synchronized(name, lock_file_prefix=None, external=False, lock_path=None):
"""Synchronization decorator.
Decorating a method like so::
@synchronized('mylock')
def foo(self, *args):
...
ensures that only one thread will execute the foo method at a time.
Different methods can share the same lock::
@synchronized('mylock')
def foo(self, *args):
...
@synchronized('mylock')
def bar(self, *args):
...
This way only one of either foo or bar can be executing at a time.
"""
def wrap(f):
@functools.wraps(f)
def inner(*args, **kwargs):
try:
with lock(name, lock_file_prefix, external, lock_path):
LOG.debug(_('Got semaphore / lock "%(function)s"'),
{'function': f.__name__})
return f(*args, **kwargs)
finally:
LOG.debug(_('Semaphore / lock released "%(function)s"'),
{'function': f.__name__})
return inner
return wrap
def synchronized_with_prefix(lock_file_prefix):
"""Partial object generator for the synchronization decorator.
Redefine @synchronized in each project like so::
(in nova/utils.py)
from nova.openstack.common import lockutils
synchronized = lockutils.synchronized_with_prefix('nova-')
(in nova/foo.py)
from nova import utils
@utils.synchronized('mylock')
def bar(self, *args):
...
The lock_file_prefix argument is used to provide lock files on disk with a
meaningful prefix.
"""
return functools.partial(synchronized, lock_file_prefix=lock_file_prefix)
def main(argv):
"""Create a dir for locks and pass it to command from arguments
If you run this:
python -m openstack.common.lockutils python setup.py testr <etc>
a temporary directory will be created for all your locks and passed to all
your tests in an environment variable. The temporary dir will be deleted
afterwards and the return value will be preserved.
"""
lock_dir = tempfile.mkdtemp()
os.environ["NOVA_LOCK_PATH"] = lock_dir
try:
ret_val = subprocess.call(argv[1:])
finally:
shutil.rmtree(lock_dir, ignore_errors=True)
return ret_val
if __name__ == '__main__':
sys.exit(main(sys.argv))
| |
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import sys
import socket
try:
from urllib.parse import quote, urlparse
from socketserver import ThreadingMixIn
from http.server import HTTPServer
except ImportError:
from urllib import quote
from urlparse import urlparse
from SocketServer import ThreadingMixIn
from BaseHTTPServer import HTTPServer
from werkzeug.serving import WSGIRequestHandler
from werkzeug.wrappers import Request as _Request
from werkzeug.datastructures import Headers
from isso.compat import string_types
def host(environ): # pragma: no cover
"""
Reconstruct host from environment. A modified version
of http://www.python.org/dev/peps/pep-0333/#url-reconstruction
"""
url = environ['wsgi.url_scheme']+'://'
if environ.get('HTTP_HOST'):
url += environ['HTTP_HOST']
else:
url += environ['SERVER_NAME']
if environ['wsgi.url_scheme'] == 'https':
if environ['SERVER_PORT'] != '443':
url += ':' + environ['SERVER_PORT']
else:
if environ['SERVER_PORT'] != '80':
url += ':' + environ['SERVER_PORT']
return url + quote(environ.get('SCRIPT_NAME', ''))
def urlsplit(name):
"""
Parse :param:`name` into (netloc, port, ssl)
"""
if not (isinstance(name, string_types)):
name = str(name)
if not name.startswith(('http://', 'https://')):
name = 'http://' + name
rv = urlparse(name)
if rv.scheme == 'https' and rv.port is None:
return rv.netloc, 443, True
return rv.netloc.rsplit(':')[0], rv.port or 80, rv.scheme == 'https'
def urljoin(netloc, port, ssl):
"""
Basically the counter-part of :func:`urlsplit`.
"""
rv = ("https" if ssl else "http") + "://" + netloc
if ssl and port != 443 or not ssl and port != 80:
rv += ":%i" % port
return rv
def origin(hosts):
"""
Return a function that returns a valid HTTP Origin or localhost
if none found.
"""
hosts = [urlsplit(h) for h in hosts]
def func(environ):
if not hosts:
return "http://invalid.local"
loc = environ.get("HTTP_ORIGIN", environ.get("HTTP_REFERER", None))
if loc is None:
return urljoin(*hosts[0])
for split in hosts:
if urlsplit(loc) == split:
return urljoin(*split)
else:
return urljoin(*hosts[0])
return func
class SubURI(object):
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
script_name = environ.get('HTTP_X_SCRIPT_NAME')
if script_name:
environ['SCRIPT_NAME'] = script_name
path_info = environ['PATH_INFO']
if path_info.startswith(script_name):
environ['PATH_INFO'] = path_info[len(script_name):]
return self.app(environ, start_response)
class CORSMiddleware(object):
"""Add Cross-origin resource sharing headers to every request."""
methods = ("HEAD", "GET", "POST", "PUT", "DELETE")
def __init__(self, app, origin, allowed=None, exposed=None):
self.app = app
self.origin = origin
self.allowed = allowed
self.exposed = exposed
def __call__(self, environ, start_response):
def add_cors_headers(status, headers, exc_info=None):
headers = Headers(headers)
headers.add("Access-Control-Allow-Origin", self.origin(environ))
headers.add("Access-Control-Allow-Credentials", "true")
headers.add("Access-Control-Allow-Methods", ", ".join(self.methods))
if self.allowed:
headers.add("Access-Control-Allow-Headers", ", ".join(self.allowed))
if self.exposed:
headers.add("Access-Control-Expose-Headers", ", ".join(self.exposed))
return start_response(status, headers.to_list(), exc_info)
if environ.get("REQUEST_METHOD") == "OPTIONS":
add_cors_headers("200 Ok", [("Content-Type", "text/plain")])
return []
return self.app(environ, add_cors_headers)
class LegacyWerkzeugMiddleware(object):
# Add compatibility with werkzeug 0.8
# -- https://github.com/posativ/isso/pull/170
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
def to_native(x, charset=sys.getdefaultencoding(), errors='strict'):
if x is None or isinstance(x, str):
return x
return x.decode(charset, errors)
def fix_headers(status, headers, exc_info=None):
headers = [(to_native(key), value) for key, value in headers]
return start_response(status, headers, exc_info)
return self.app(environ, fix_headers)
class Request(_Request):
# Assuming UTF-8, comments with 65536 characters would consume
# 128 kb memory. The remaining 128 kb cover additional parameters
# and WSGI headers.
max_content_length = 256 * 1024
class SocketWSGIRequestHandler(WSGIRequestHandler):
def run_wsgi(self):
self.client_address = ("<local>", 0)
super(SocketWSGIRequestHandler, self).run_wsgi()
class SocketHTTPServer(HTTPServer, ThreadingMixIn):
"""
A simple SocketServer to serve werkzeug's WSGIRequesthandler.
"""
multithread = True
multiprocess = False
allow_reuse_address = 1
address_family = socket.AF_UNIX
request_queue_size = 128
def __init__(self, sock, app):
HTTPServer.__init__(self, sock, SocketWSGIRequestHandler)
self.app = app
self.ssl_context = None
self.shutdown_signal = False
| |
# -*- coding: utf-8 -*-
from heinzel.core import models
from heinzel.core import exceptions
from heinzel.tests.utils import Fixture, runtests
from model_examples import Car, Brand, Manufacturer, Driver, Key
models.register([Manufacturer, Brand, Car, Driver, Key])
class BasicAssumptions(Fixture):
def runTest(self):
bmwgroup = Manufacturer(name="Bayerische Motoren Werke AG")
bmwgroup.save()
bmw = Brand(name="BMW")
bmw.save()
bmw.manufacturer = bmwgroup
bmw3er = Car(name="3er")
bmw7er = Car(name="7er")
bmwX3 = Car(name="X3")
bmw3er.save()
bmw7er.save()
bmwX3.save()
bmw3er.brand = bmw
bmw7er.brand = bmw
bmwX3.brand = bmw
daimlergroup = Manufacturer(name="Daimler AG")
daimlergroup.save()
benz = Brand(name="Mercedes Benz")
benz.save()
benz.manufacturer = daimlergroup
benzT = Car(name="T Modell")
benzGLK = Car(name="GLK")
benzSLK = Car(name="SLK")
benzT.save()
benzGLK.save()
benzSLK.save()
benzT.brand = benz
benzGLK.brand = benz
benzSLK.brand = benz
harry = Driver(first_name="Harry", last_name="Klein")
ayrton = Driver(first_name="Ayrton", last_name="Senna")
harry.save()
ayrton.save()
harry.cars = [benzT, bmw3er, bmwX3]
ayrton.cars.add([benzGLK, benzSLK, bmw7er, bmwX3])
benzTkey = Key(serial=12)
benzTkey.save()
bmwX3key = Key(serial=10039)
bmwX3key.save()
benzTkey.owner = harry
bmwX3key.owner = ayrton
class all(Fixture):
def runTest(self):
bmwgroup = Manufacturer(name="Bayerische Motoren Werke AG")
bmwgroup.save()
bmw = Brand(name="BMW")
bmw.save()
bmw.manufacturer = bmwgroup
bmw3er = Car(name="3er")
bmw7er = Car(name="7er")
bmwX3 = Car(name="X3")
bmw3er.save()
bmw7er.save()
bmwX3.save()
bmw3er.brand = bmw
bmw7er.brand = bmw
bmwX3.brand = bmw
daimlergroup = Manufacturer(name="Daimler AG")
daimlergroup.save()
benz = Brand(name="Mercedes Benz")
benz.save()
benz.manufacturer = daimlergroup
benzT = Car(name="T Modell")
benzGLK = Car(name="GLK")
benzSLK = Car(name="SLK")
benzT.save()
benzGLK.save()
benzSLK.save()
benzT.brand = benz
benzGLK.brand = benz
benzSLK.brand = benz
harry = Driver(first_name="Harry", last_name="Klein")
ayrton = Driver(first_name="Ayrton", last_name="Senna")
harry.save()
ayrton.save()
harry.cars = [benzT, bmw3er, bmwX3]
ayrton.cars.add([benzGLK, benzSLK, bmw7er, bmwX3])
benzTkey = Key(serial=12)
benzTkey.save()
bmwX3key = Key(serial=10039)
bmwX3key.save()
benzTkey.owner = harry
bmwX3key.owner = ayrton
#db setup complete
# all returns all instances of a Model
allcars = list(Car.objects.all())
self.assert_(allcars == [bmw3er, bmw7er, bmwX3, benzT, benzGLK, benzSLK])
alldrivers = list(Driver.objects.all())
self.assert_(alldrivers == [harry, ayrton])
class filter(Fixture):
"""
The Manager.filter method facilitates searching the database with
filtering conditions. Available filters are:
startswith
endswith
contains
gt
gte
lt
lte
exact
in
between
"""
def runTest(self):
bmwgroup = Manufacturer(name="Bayerische Motoren Werke AG")
bmwgroup.save()
bmw = Brand(name="BMW")
bmw.save()
bmw.manufacturer = bmwgroup
bmw3er = Car(name="3er")
bmw7er = Car(name="7er")
bmwX3 = Car(name="X3")
bmw3er.save()
bmw7er.save()
bmwX3.save()
bmw3er.brand = bmw
bmw7er.brand = bmw
bmwX3.brand = bmw
daimlergroup = Manufacturer(name="Daimler AG")
daimlergroup.save()
benz = Brand(name="Mercedes Benz")
benz.save()
benz.manufacturer = daimlergroup
benzT = Car(name="T Modell")
benzGLK = Car(name="GLK")
benzSLK = Car(name="SLK")
benzT.save()
benzGLK.save()
benzSLK.save()
benzT.brand = benz
benzGLK.brand = benz
benzSLK.brand = benz
harry = Driver(first_name="Harry", last_name="Klein")
ayrton = Driver(first_name="Ayrton", last_name="Senna")
harry.save()
ayrton.save()
harry.cars = [benzT, bmw3er, bmwX3]
ayrton.cars.add([benzGLK, benzSLK, bmw7er, bmwX3])
benzTkey = Key(serial=12)
benzTkey.save()
bmwX3key = Key(serial=10039)
bmwX3key.save()
benzTkey.owner = harry
bmwX3key.owner = ayrton
keys = []
for x in range(10):
k = Key(serial=x)
k.save()
keys.append(k)
drivernames = (
("Mina", "da Silva Sanches"),
("Gundula", "Gause"),
("Sandra", "Maischberger"),
("Bruno", "Gaspard"),
("Hermi", "Tock"),
("Willi", "Gunther"),
("Gisbert", "Geier")
)
drivers = []
for f, l in drivernames:
d = Driver(first_name=f, last_name=l)
d.save()
drivers.append(d)
#db setup complete
# make a search for certain attributes of a Model
# startswith
qs = Driver.objects.filter(last_name__startswith="G")
self.assert_(list(qs) == [drivers[1], drivers[3], drivers[5], drivers[6]])
# endswith
qs = Car.objects.filter(name__endswith="er")
self.assert_(list(qs) == [bmw3er, bmw7er])
self.assert_(list(qs)[0].name == "3er")
self.assert_(list(qs)[1].name == "7er")
# contains
qs = Driver.objects.filter(last_name__contains="aspa")
self.assert_(list(qs) == [drivers[3]] and list(qs)[0].first_name == "Bruno")
# gt
qs = Key.objects.filter(serial__gt=8)
self.assert_(list(qs) == [benzTkey, bmwX3key, keys[9]])
# gte
qs = Key.objects.filter(serial__gte=8)
self.assert_(list(qs) == [benzTkey, bmwX3key, keys[8], keys[9]])
# lt
qs = Key.objects.filter(serial__lt=3)
self.assert_(list(qs) == [keys[0], keys[1], keys[2]])
# lte
qs = Key.objects.filter(serial__lte=3)
self.assert_(list(qs) == [keys[0], keys[1], keys[2], keys[3]])
# exact
qs = Car.objects.filter(name__exact="3er")
self.assert_(list(qs) == [bmw3er])
# in
qs = Key.objects.filter(serial__in=(1,3,5))
self.assert_(list(qs) == [keys[1], keys[3], keys[5]])
# between
qs = Key.objects.filter(serial__between=(3, 1))
self.assert_(list(qs) == [keys[2]])
qs = Key.objects.filter(serial__between=(3, 5))
self.assert_(list(qs) == [keys[4]])
class exclude(Fixture):
def runTest(self):
pass
class get(Fixture):
def runTest(self):
pass
class create(Fixture):
def runTest(self):
pass
class get_or_create(Fixture):
def runTest(self):
pass
class rollback(Fixture):
def runTest(self):
pass
if __name__ == "__main__":
alltests = (
all,
#filter,
# exclude,
# get,
# create,
# get_or_create,
#! implement
# rollback,
)
runtests(tests=alltests, verbosity=3)
| |
"""
ulid/ulid
~~~~~~~~~
Object representation of a ULID.
"""
import binascii
import datetime
import typing
import uuid
from . import base32, hints
__all__ = ['Timestamp', 'Randomness', 'ULID']
#: Type hint that defines multiple primitive types and itself for comparing MemoryView instances.
MemoryViewPrimitive = typing.Union['MemoryView', hints.Primitive] # pylint: disable=invalid-name
class MemoryView:
"""
Wraps a buffer object, typically :class:`~bytes`, with a :class:`~memoryview` and provides easy
type comparisons and conversions between presentation formats.
"""
__slots__ = ['memory']
def __init__(self, buffer: hints.Buffer) -> None:
self.memory = memoryview(buffer)
def __eq__(self, other: MemoryViewPrimitive) -> hints.Bool: # type: ignore[override]
if isinstance(other, MemoryView):
return self.memory == other.memory
if isinstance(other, (bytes, bytearray, memoryview)):
return self.memory == other
if isinstance(other, int):
return self.int == other
if isinstance(other, float):
return self.float == other
if isinstance(other, str):
return self.str == other
return NotImplemented
def __ne__(self, other: MemoryViewPrimitive) -> hints.Bool: # type: ignore[override]
if isinstance(other, MemoryView):
return self.memory != other.memory
if isinstance(other, (bytes, bytearray, memoryview)):
return self.memory != other
if isinstance(other, int):
return self.int != other
if isinstance(other, float):
return self.float != other
if isinstance(other, str):
return self.str != other
return NotImplemented
def __lt__(self, other: MemoryViewPrimitive) -> hints.Bool:
if isinstance(other, MemoryView):
return self.int < other.int
if isinstance(other, (bytes, bytearray)):
return self.bytes < other
if isinstance(other, memoryview):
return self.bytes < other.tobytes()
if isinstance(other, int):
return self.int < other
if isinstance(other, float):
return self.float < other
if isinstance(other, str):
return self.str < other
return NotImplemented
def __gt__(self, other: MemoryViewPrimitive) -> hints.Bool:
if isinstance(other, MemoryView):
return self.int > other.int
if isinstance(other, (bytes, bytearray)):
return self.bytes > other
if isinstance(other, memoryview):
return self.bytes > other.tobytes()
if isinstance(other, int):
return self.int > other
if isinstance(other, float):
return self.float > other
if isinstance(other, str):
return self.str > other
return NotImplemented
def __le__(self, other: MemoryViewPrimitive) -> hints.Bool:
if isinstance(other, MemoryView):
return self.int <= other.int
if isinstance(other, (bytes, bytearray)):
return self.bytes <= other
if isinstance(other, memoryview):
return self.bytes <= other.tobytes()
if isinstance(other, int):
return self.int <= other
if isinstance(other, float):
return self.float <= other
if isinstance(other, str):
return self.str <= other
return NotImplemented
def __ge__(self, other: MemoryViewPrimitive) -> hints.Bool:
if isinstance(other, MemoryView):
return self.int >= other.int
if isinstance(other, (bytes, bytearray)):
return self.bytes >= other
if isinstance(other, memoryview):
return self.bytes >= other.tobytes()
if isinstance(other, int):
return self.int >= other
if isinstance(other, float):
return self.float >= other
if isinstance(other, str):
return self.str >= other
return NotImplemented
def __hash__(self) -> hints.Int:
return hash(self.memory)
def __bytes__(self) -> hints.Bytes:
return self.bytes
def __float__(self) -> hints.Float:
return self.float
def __int__(self) -> hints.Int:
return self.int
def __index__(self) -> hints.Int:
return self.int
def __repr__(self) -> hints.Str:
return '<{}({!r})>'.format(self.__class__.__name__, str(self))
def __str__(self) -> hints.Str:
return self.str
def __getstate__(self) -> hints.Str:
return self.str
def __setstate__(self, state: hints.Str) -> None:
self.memory = memoryview(base32.decode(state))
@property
def bin(self) -> hints.Str:
"""
Computes the binary string value of the underlying :class:`~memoryview`.
:return: Memory in binary string form
:rtype: :class:`~str`
"""
return bin(self.int)
@property
def bytes(self) -> hints.Bytes:
"""
Computes the bytes value of the underlying :class:`~memoryview`.
:return: Memory in bytes form
:rtype: :class:`~bytes`
"""
return self.memory.tobytes()
@property
def float(self) -> hints.Float:
"""
Computes the float value of the underlying :class:`~memoryview` in big-endian byte order.
:return: Bytes in float form.
:rtype: :class:`~float`
"""
return float(self.int)
@property
def hex(self) -> hints.Str:
"""
Computes the hexadecimal string value of the underlying :class:`~memoryview`.
:return: Memory in hexadecimal string form
:rtype: :class:`~str`
"""
return '0x' + binascii.hexlify(self.bytes).decode()
@property
def int(self) -> hints.Int:
"""
Computes the integer value of the underlying :class:`~memoryview` in big-endian byte order.
:return: Bytes in integer form.
:rtype: :class:`~int`
"""
return int.from_bytes(self.memory, byteorder='big')
@property
def oct(self) -> hints.Str:
"""
Computes the octal string value of the underlying :class:`~memoryview`.
:return: Memory in octal string form
:rtype: :class:`~str`
"""
return oct(self.int)
@property
def str(self) -> hints.Str:
"""
Computes the string value of the underlying :class:`~memoryview` in Base32 encoding.
.. note:: The base implementation here will call :func:`~ulid.base32.encode` which
performs analysis on the bytes to determine how it should be decoded. This is going to
be slightly slower than calling the explicit `encode_*` methods so each model that
derives from this class can/should override and specify the explicit function to call.
:return: Bytes in Base32 string form.
:rtype: :class:`~str`
:raises ValueError: if underlying :class:`~memoryview` cannot be encoded
"""
return base32.encode(self.memory)
class Timestamp(MemoryView):
"""
Represents the timestamp portion of a ULID.
* Unix time (time since epoch) in milliseconds.
* First 48 bits of ULID when in binary format.
* First 10 characters of ULID when in string format.
"""
__slots__ = MemoryView.__slots__
@property
def str(self) -> hints.Str:
"""
Computes the string value of the timestamp from the underlying :class:`~memoryview` in Base32 encoding.
:return: Timestamp in Base32 string form.
:rtype: :class:`~str`
:raises ValueError: if underlying :class:`~memoryview` cannot be encoded
"""
return base32.encode_timestamp(self.memory)
@property
def timestamp(self) -> hints.Float:
"""
Computes the Unix time (seconds since epoch) from its :class:`~memoryview`.
:return: Timestamp in Unix time (seconds since epoch) form.
:rtype: :class:`~float`
"""
return self.int / 1000.0
@property
def datetime(self) -> hints.Datetime:
"""
Creates a :class:`~datetime.datetime` instance (assumes UTC) from the Unix time value of the timestamp
with millisecond precision.
:return: Timestamp in datetime form.
:rtype: :class:`~datetime.datetime`
"""
milli = self.int
micro = milli % 1000 * 1000
sec = milli // 1000.0
timezone = datetime.timezone.utc
return datetime.datetime.utcfromtimestamp(sec).replace(microsecond=micro, tzinfo=timezone)
class Randomness(MemoryView):
"""
Represents the randomness portion of a ULID.
* Cryptographically secure random values.
* Last 80 bits of ULID when in binary format.
* Last 16 characters of ULID when in string format.
"""
__slots__ = MemoryView.__slots__
@property
def str(self) -> hints.Str:
"""
Computes the string value of the randomness from the underlying :class:`~memoryview` in Base32 encoding.
:return: Timestamp in Base32 string form.
:rtype: :class:`~str`
:raises ValueError: if underlying :class:`~memoryview` cannot be encoded
"""
return base32.encode_randomness(self.memory)
class ULID(MemoryView):
"""
Represents a ULID.
* 128 bits in binary format.
* 26 characters in string format.
* 16 octets.
* Network byte order, big-endian, most significant bit first.
"""
__slots__ = MemoryView.__slots__
@property
def str(self) -> hints.Str:
"""
Computes the string value of the ULID from its :class:`~memoryview` in Base32 encoding.
:return: ULID in Base32 string form.
:rtype: :class:`~str`
:raises ValueError: if underlying :class:`~memoryview` cannot be encoded
"""
return base32.encode_ulid(self.memory)
def timestamp(self) -> Timestamp:
"""
Creates a :class:`~ulid.ulid.Timestamp` instance that maps to the first 48 bits of this ULID.
:return: Timestamp from first 48 bits.
:rtype: :class:`~ulid.ulid.Timestamp`
"""
return Timestamp(self.memory[:6])
def randomness(self) -> Randomness:
"""
Creates a :class:`~ulid.ulid.Randomness` instance that maps to the last 80 bits of this ULID.
:return: Timestamp from first 48 bits.
:rtype: :class:`~ulid.ulid.Timestamp`
"""
return Randomness(self.memory[6:])
@property
def uuid(self) -> hints.UUID:
"""
Creates a :class:`~uuid.UUID` instance of the ULID from its :class:`~bytes` representation.
:return: UUIDv4 from the ULID bytes
:rtype: :class:`~uuid.UUID`
"""
return uuid.UUID(bytes=self.bytes)
| |
import math
import csv
import numpy
import matplotlib.pyplot as plt
import numpy as np
small_threshold = 10
large_threshold = 50
def read_raw(filename):
"""
Reads in SMI eye data txt files
assuming the first row is the timestamp, the 4th are x coordinates
5th y coordinates of eyegaze
returns: time, x, y
"""
#read eyedata
f=open(filename,'r')
line=f.readlines()
#read into lists
time=[]
x=[]
y=[]
for i in range(len(line)-1):
if line[i][0] != '#' and(line[i][0] !='T') and not('MSG' in line[i]):
line[i]=line[i].replace(',','.')
s=line[i].split()
time.append(float(s[0]))
x.append(float(s[3]))
y.append(float(s[4]))
return time, x, y
def small_fixation(x,y):
"""
detect four points in small_fixation.
If there are four points in small_thresfold,
it is storaged in fixation and returned.
"""
t = 1
for i in range(4):
for k in range(4):
if math.hypot((x[i]-x[k]),(y[i]-y[k])) > small_threshold:
t = 0
break
if t == 1:
return 1
#true
else :
return 0
#false
def large_fixation(time,x,y,s):
"""
detect large_fixation
If a point is detected fixation, it is storaged fixation_x,foxaton_y,fixation_time
first, small_fixation are storaged on fixation_time,fixation_x,fixation_y
"""
fixation_time=[time[s],time[s+1],time[s+2],time[s+3]]
fixation_x=[x[s],x[s+1],x[s+2],x[s+3]]
fixation_y=[y[s],y[s+1],y[s+2],y[s+3]]
w=s
for i in range(s+4,len(x)-3):
#detect the time that between new gaze point and first fixation point is shorter than 250ms
if time[i]-fixation_time[0]>25000000:#25000000
w=i
break
else:
# print(time[i]-fixation_time[0])
if distance_calculation(x,y,i,fixation_x,fixation_y)==1:
w=i
fixation_time.append(time[i])
fixation_x.append(x[i])
fixation_y.append(y[i])
#If three point are not detected fixation in succession, fixation detection finish
elif (distance_calculation(x,y,i+1,fixation_x,fixation_y)==0)and(distance_calculation(x,y,i+2,fixation_x,fixation_y)==0):
w=i
break
return fixation_time,fixation_x,fixation_y,w
def distance_calculation(x,y,i,fixation_x,fixation_y):
#caliculate the distance between one point and all points that detected fixation
#If all distance are in large_threhold, return true
w=1;
for k in range(len(fixation_x)):
if math.hypot((x[i]-fixation_x[k]),(y[i]-fixation_y[k])) > large_threshold:
w = 0;
break;
if w == 0:
return 0
#false
elif w == 1:
return 1
#true
def calculate_ave(fixation_time,fixation_x,fixation_y):
#caliculate average of points that are detected fixation
fixation_duration=(fixation_time[-1]-fixation_time[0])
fixation_time=numpy.mean(fixation_time)
fixation_x=numpy.mean(fixation_x)
fixation_y=numpy.mean(fixation_y)
return fixation_time,fixation_duration,fixation_x,fixation_y
def filter_fixations(time,x,y):
fixation_value=[]
q=0
for e in range(len(x)-4):
if q>len(x)-4:
break
#first_x,first_y are first four points
first_x=[x[q],x[q+1],x[q+2],x[q+3]]
first_y=[y[q],y[q+1],y[q+2],y[q+3]]
#print first_x, first_y
if (small_fixation(first_x,first_y)==1):
(fixation_time,fixation_x,fixation_y,q)=large_fixation(time,x,y,q)
fixation_value.append(calculate_ave(fixation_time, fixation_x, fixation_y))
if fixation_value[-1][2]==0:
fixation_value.pop()
q=q+1
#print fixation_value
time = []
fixation_duration = []
fixation_x = []
fixation_y = []
for a in range(len(fixation_value)):
#print fixation_value[a]
time.append(fixation_value[a][0])
fixation_duration.append(fixation_value[a][1])
fixation_x.append(fixation_value[a][2])
fixation_y.append(fixation_value[a][3])
return time, fixation_duration, fixation_x, fixation_y
import pandas
import numpy.linalg as lin
from matplotlib.mlab import PCA
import numpy as np
import pandas
from math import atan2, pi, degrees
from numpy import var, mean, median
def gb(x, y):
angle = degrees(atan2(y, x))
return angle
def get_saccade_directions(d,x,y):
npd = np.array(d)
npx = np.array(x)
npy = np.array(y)
combined = np.vstack((npd, npx, npy)).T
mdatal = []
for d in combined:
mdatal.append({'len':d[0],'x':d[1],'y':d[2]})
as1 = []
as2 = []
as3 = []
as4 = []
for w in slidingWindow(mdatal,125,25):
s1,s2,s3,s4=calc_features(w)
as1.append(s1)
as2.append(s2)
as3.append(s3)
as4.append(s4)
return as1, as2, as3, as4
def calc_features(eyegaze):
'''
calculate some features on an eye fixations
it assumes a dictionary with keys:
t = time
len = length of fixation
x = x coordinate of fixation
y = y coordinate of fixation
'''
data = pandas.DataFrame(eyegaze)
#remove mean
#diff between two points
d_d = data.diff()
#calculate slope
#calculate the angles between two points (using the difference)
angles = []
for pt in [(d_d.x[i],d_d.y[i]) for i in range(1,len(d_d))]:
angles.append(gb(pt[0], pt[1]))
d_a = pandas.DataFrame(angles)
#count angles between the given degrees
a1c = np.count_nonzero((d_a < -45.0) | (d_a <= 45.0))
a2c = np.count_nonzero(((d_a < 135) & (d_a <=180)) | ((d_a > -18.0) & (d_a <= 135.0)))
a3c = np.count_nonzero((d_a > 45.0) & (d_a < 180.0))
a4c = np.count_nonzero((d_a >= -45.0) & (d_a > 135.0))
return a1c, a2c, a3c, a4c
def slidingWindow(sequence,winSize,step=1):
"""Returns a generator that will iterate through
the defined chunks of input sequence. Input sequence
must be iterable."""
# Verify the inputs
try: it = iter(sequence)
except TypeError:
raise Exception("**ERROR** sequence must be iterable.")
if not ((type(winSize) == type(0)) and (type(step) == type(0))):
raise Exception("**ERROR** type(winSize) and type(step) must be int.")
if step > winSize:
raise Exception("**ERROR** step must not be larger than winSize.")
if winSize > len(sequence):
raise Exception("**ERROR** winSize must not be larger than sequence length.")
# Pre-compute number of chunks to emit
numOfChunks = ((len(sequence)-winSize)/step)+1
# Do the work
for i in range(0,int(numOfChunks*step),int(step)):
yield sequence[i:i+winSize]
| |
import numpy as np
from menpo.shape import PointCloud
from menpo.transform import tcoords_to_image_coords
from ..adjacency import mask_adjacency_array, reindex_adjacency_array
from .base import TriMesh, grid_tcoords
class TexturedTriMesh(TriMesh):
r"""
Combines a :map:`TriMesh` with a texture. Also encapsulates the texture
coordinates required to render the texture on the mesh.
Parameters
----------
points : ``(n_points, n_dims)`` `ndarray`
The array representing the points.
tcoords : ``(N, 2)`` `ndarray`
The texture coordinates for the mesh.
texture : :map:`Image`
The texture for the mesh.
trilist : ``(M, 3)`` `ndarray` or ``None``, optional
The triangle list. If ``None``, a Delaunay triangulation of
the points will be used instead.
copy: `bool`, optional
If ``False``, the points, trilist and texture will not be copied on
assignment.
In general this should only be used if you know what you are doing.
"""
def __init__(self, points, tcoords, texture, trilist=None, copy=True):
super(TexturedTriMesh, self).__init__(points, trilist=trilist,
copy=copy)
self.tcoords = PointCloud(tcoords, copy=copy)
if not copy:
self.texture = texture
else:
self.texture = texture.copy()
@property
def n_channels(self):
r"""
The number of channels of colour used (e.g. 3 for RGB).
:type: `int`
"""
return self.texture.n_channels
@classmethod
def init_2d_grid(cls, shape, spacing=None, tcoords=None, texture=None):
r"""
Create a TexturedTriMesh that exists on a regular 2D grid. The first
dimension is the number of rows in the grid and the second dimension
of the shape is the number of columns. ``spacing`` optionally allows
the definition of the distance between points (uniform over points).
The spacing may be different for rows and columns.
The triangulation will be right-handed and the diagonal will go from
the top left to the bottom right of a square on the grid.
If no texture is passed a blank (black) texture is attached with
correct texture coordinates for texture mapping an image of the same
size as ``shape``.
Parameters
----------
shape : `tuple` of 2 `int`
The size of the grid to create, this defines the number of points
across each dimension in the grid. The first element is the number
of rows and the second is the number of columns.
spacing : `int` or `tuple` of 2 `int`, optional
The spacing between points. If a single `int` is provided, this
is applied uniformly across each dimension. If a `tuple` is
provided, the spacing is applied non-uniformly as defined e.g.
``(2, 3)`` gives a spacing of 2 for the rows and 3 for the
columns.
tcoords : ``(N, 2)`` `ndarray`, optional
The texture coordinates for the mesh.
texture : :map:`Image`, optional
The texture for the mesh.
Returns
-------
trimesh : :map:`TriMesh`
A TriMesh arranged in a grid.
"""
pc = TriMesh.init_2d_grid(shape, spacing=spacing)
points = pc.points
trilist = pc.trilist
# Ensure that the tcoords and texture are copied
if tcoords is not None:
tcoords = tcoords.copy()
else:
tcoords = grid_tcoords(shape)
if texture is not None:
texture = texture.copy()
else:
from menpo.image import Image
# Default texture is all black
texture = Image.init_blank(shape)
return TexturedTriMesh(points, tcoords, texture, trilist=trilist,
copy=False)
@classmethod
def init_from_depth_image(cls, depth_image, tcoords=None, texture=None):
r"""
Return a 3D textured triangular mesh from the given depth image. The
depth image is assumed to represent height/depth values and the XY
coordinates are assumed to unit spaced and represent image coordinates.
This is particularly useful for visualising depth values that have been
recovered from images.
The optionally passed texture will be textured mapped onto the planar
surface using the correct texture coordinates for an image of the
same shape as ``depth_image``.
Parameters
----------
depth_image : :map:`Image` or subclass
A single channel image that contains depth values - as commonly
returned by RGBD cameras, for example.
tcoords : ``(N, 2)`` `ndarray`, optional
The texture coordinates for the mesh.
texture : :map:`Image`, optional
The texture for the mesh.
Returns
-------
depth_cloud : ``type(cls)``
A new 3D TriMesh with unit XY coordinates and the given depth
values as Z coordinates. The trilist is constructed as in
:meth:`init_2d_grid`.
"""
from menpo.image import MaskedImage
new_tmesh = cls.init_2d_grid(depth_image.shape, tcoords=tcoords,
texture=texture)
if isinstance(depth_image, MaskedImage):
new_tmesh = new_tmesh.from_mask(depth_image.mask.as_vector())
return cls(np.hstack([new_tmesh.points,
depth_image.as_vector(keep_channels=True).T]),
new_tmesh.tcoords.points,
new_tmesh.texture,
trilist=new_tmesh.trilist,
copy=False)
def tcoords_pixel_scaled(self):
r"""
Returns a :map:`PointCloud` that is modified to be suitable for directly
indexing into the pixels of the texture (e.g. for manual mapping
operations). The resulting tcoords behave just like image landmarks
do.
The operations that are performed are:
- Flipping the origin from bottom-left to top-left
- Scaling the tcoords by the image shape (denormalising them)
- Permuting the axis so that
Returns
-------
tcoords_scaled : :map:`PointCloud`
A copy of the tcoords that behave like :map:`Image` landmarks
Examples
--------
Recovering pixel values for every texture coordinate:
>>> texture = texturedtrimesh.texture
>>> tc_ps = texturedtrimesh.tcoords_pixel_scaled()
>>> pixel_values_at_tcs = texture.sample(tc_ps)
"""
return tcoords_to_image_coords(self.texture.shape).apply(self.tcoords)
def from_vector(self, flattened):
r"""
Builds a new :class:`TexturedTriMesh` given the `flattened` 1D vector.
Note that the trilist, texture, and tcoords will be drawn from self.
Parameters
----------
flattened : ``(N,)`` `ndarray`
Vector representing a set of points.
Returns
--------
trimesh : :map:`TriMesh`
A new trimesh created from the vector with ``self`` trilist.
"""
return TexturedTriMesh(flattened.reshape([-1, self.n_dims]),
self.tcoords.points, self.texture,
trilist=self.trilist)
def from_mask(self, mask):
"""
A 1D boolean array with the same number of elements as the number of
points in the TexturedTriMesh. This is then broadcast across the
dimensions of the mesh and returns a new mesh containing only those
points that were ``True`` in the mask.
Parameters
----------
mask : ``(n_points,)`` `ndarray`
1D array of booleans
Returns
-------
mesh : :map:`TexturedTriMesh`
A new mesh that has been masked.
"""
if mask.shape[0] != self.n_points:
raise ValueError('Mask must be a 1D boolean array of the same '
'number of entries as points in this '
'TexturedTriMesh.')
ttm = self.copy()
if np.all(mask): # Fast path for all true
return ttm
else:
# Recalculate the mask to remove isolated vertices
isolated_mask = self._isolated_mask(mask)
# Recreate the adjacency array with the updated mask
masked_adj = mask_adjacency_array(isolated_mask, self.trilist)
ttm.trilist = reindex_adjacency_array(masked_adj)
ttm.points = ttm.points[isolated_mask, :]
ttm.tcoords.points = ttm.tcoords.points[isolated_mask, :]
return ttm
def clip_texture(self, range=(0., 1.)):
"""
Method that returns a copy of the object with the texture values
clipped in range ``(0, 1)``.
Parameters
----------
range : ``(float, float)``, optional
The clipping range.
Returns
-------
self : :map:`ColouredTriMesh`
A copy of self with its texture clipped.
"""
instance = self.copy()
instance.texture.pixels = np.clip(self.texture.pixels, *range)
return instance
def rescale_texture(self, minimum, maximum, per_channel=True):
r"""
A copy of this mesh with texture linearly rescaled to fit a range.
Parameters
----------
minimum: `float`
The minimal value of the rescaled colours
maximum: `float`
The maximal value of the rescaled colours
per_channel: `boolean`, optional
If ``True``, each channel will be rescaled independently. If
``False``, the scaling will be over all channels.
Returns
-------
textured_mesh : ``type(self)``
A copy of this mesh with texture linearly rescaled to fit in the
range provided.
"""
instance = self.copy()
instance.texture = instance.texture.rescale_pixels(
minimum, maximum, per_channel=per_channel)
return instance
def _view_3d(self, figure_id=None, new_figure=True, render_texture=True,
mesh_type='surface', ambient_light=0.0, specular_light=0.0,
colour='r', line_width=2, normals=None, normals_colour='k',
normals_line_width=2, normals_marker_style='2darrow',
normals_marker_resolution=8, normals_marker_size=None,
step=None, alpha=1.0):
r"""
Visualize the Textured TriMesh in 3D.
Parameters
----------
figure_id : `object`, optional
The id of the figure to be used.
new_figure : `bool`, optional
If ``True``, a new figure is created.
render_texture : `bool`, optional
If ``True``, then the texture is rendered. If ``False``, then only
the TriMesh is rendered with the specified `colour`.
mesh_type : ``{'surface', 'wireframe'}``, optional
The representation type to be used for the mesh.
ambient_light : `float`, optional
The ambient light intensity. It must be in range ``[0., 1.]``.
specular_light : `float`, optional
The specular light intensity. It must be in range ``[0., 1.]``.
colour : See Below, optional
The colour of the mesh if `render_texture` is ``False``.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
line_width : `float`, optional
The width of the lines, if there are any.
normals : ``(n_points, 3)`` `ndarray` or ``None``, optional
If ``None``, then the normals will not be rendered. If `ndarray`,
then the provided normals will be rendered as well. Note that a
normal must be provided for each point in the TriMesh.
normals_colour : See Below, optional
The colour of the normals.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
normals_line_width : `float`, optional
The width of the lines of the normals. It only applies if `normals`
is not ``None``.
normals_marker_style : `str`, optional
The style of the markers of the normals. It only applies if `normals`
is not ``None``.
Example options ::
{2darrow, 2dcircle, 2dcross, 2ddash, 2ddiamond, 2dhooked_arrow,
2dsquare, 2dthick_arrow, 2dthick_cross, 2dtriangle, 2dvertex,
arrow, axes, cone, cube, cylinder, point, sphere}
normals_marker_resolution : `int`, optional
The resolution of the markers of the normals. For spheres, for
instance, this is the number of divisions along theta and phi. It
only applies if `normals` is not ``None``.
normals_marker_size : `float` or ``None``, optional
The size of the markers. This size can be seen as a scale factor
applied to the size markers, which is by default calculated from
the inter-marker spacing. If ``None``, then an optimal marker size
value will be set automatically. It only applies if `normals` is not
``None``.
step : `int` or ``None``, optional
If `int`, then one every `step` normals will be rendered.
If ``None``, then all vertexes will be rendered. It only applies if
`normals` is not ``None``.
alpha : `float`, optional
Defines the transparency (opacity) of the object.
Returns
-------
renderer : `menpo3d.visualize.TexturedTriMeshViewer3D`
The Menpo3D rendering object.
"""
if render_texture:
try:
from menpo3d.visualize import TexturedTriMeshViewer3d
renderer = TexturedTriMeshViewer3d(figure_id, new_figure,
self.points, self.trilist,
self.texture,
self.tcoords.points)
renderer.render(
mesh_type=mesh_type, ambient_light=ambient_light,
specular_light=specular_light, normals=normals,
normals_colour=normals_colour,
normals_line_width=normals_line_width,
normals_marker_style=normals_marker_style,
normals_marker_resolution=normals_marker_resolution,
normals_marker_size=normals_marker_size, step=step,
alpha=alpha)
return renderer
except ImportError:
from menpo.visualize import Menpo3dMissingError
raise Menpo3dMissingError()
else:
try:
from menpo3d.visualize import TriMeshViewer3d
renderer = TriMeshViewer3d(figure_id, new_figure, self.points,
self.trilist)
renderer.render(
mesh_type=mesh_type, line_width=line_width, colour=colour,
normals=normals, normals_colour=normals_colour,
normals_line_width=normals_line_width,
normals_marker_style=normals_marker_style,
normals_marker_resolution=normals_marker_resolution,
normals_marker_size=normals_marker_size, step=step,
alpha=alpha)
return renderer
except ImportError:
from menpo.visualize import Menpo3dMissingError
raise Menpo3dMissingError()
def _view_2d(self, figure_id=None, new_figure=False, image_view=True,
render_lines=True, line_colour='r', line_style='-',
line_width=1., render_markers=True, marker_style='o',
marker_size=5, marker_face_colour='k', marker_edge_colour='k',
marker_edge_width=1., render_numbering=False,
numbers_horizontal_align='center',
numbers_vertical_align='bottom',
numbers_font_name='sans-serif', numbers_font_size=10,
numbers_font_style='normal', numbers_font_weight='normal',
numbers_font_colour='k', render_axes=True,
axes_font_name='sans-serif', axes_font_size=10,
axes_font_style='normal', axes_font_weight='normal',
axes_x_limits=None, axes_y_limits=None, axes_x_ticks=None,
axes_y_ticks=None, figure_size=(10, 8),
label=None, **kwargs):
r"""
Visualization of the TriMesh in 2D. Currently, explicit textured TriMesh
viewing is not supported, and therefore viewing falls back to untextured
2D TriMesh viewing.
Returns
-------
figure_id : `object`, optional
The id of the figure to be used.
new_figure : `bool`, optional
If ``True``, a new figure is created.
image_view : `bool`, optional
If ``True`` the TexturedTriMesh will be viewed as if it is in the
image coordinate system.
render_lines : `bool`, optional
If ``True``, the edges will be rendered.
line_colour : See Below, optional
The colour of the lines.
Example options::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
line_style : ``{-, --, -., :}``, optional
The style of the lines.
line_width : `float`, optional
The width of the lines.
render_markers : `bool`, optional
If ``True``, the markers will be rendered.
marker_style : See Below, optional
The style of the markers. Example options ::
{., ,, o, v, ^, <, >, +, x, D, d, s, p, *, h, H, 1, 2, 3, 4, 8}
marker_size : `int`, optional
The size of the markers in points.
marker_face_colour : See Below, optional
The face (filling) colour of the markers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_edge_colour : See Below, optional
The edge colour of the markers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_edge_width : `float`, optional
The width of the markers' edge.
render_numbering : `bool`, optional
If ``True``, the landmarks will be numbered.
numbers_horizontal_align : ``{center, right, left}``, optional
The horizontal alignment of the numbers' texts.
numbers_vertical_align : ``{center, top, bottom, baseline}``, optional
The vertical alignment of the numbers' texts.
numbers_font_name : See Below, optional
The font of the numbers. Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
numbers_font_size : `int`, optional
The font size of the numbers.
numbers_font_style : ``{normal, italic, oblique}``, optional
The font style of the numbers.
numbers_font_weight : See Below, optional
The font weight of the numbers.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold, demibold, demi, bold, heavy, extra bold, black}
numbers_font_colour : See Below, optional
The font colour of the numbers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
render_axes : `bool`, optional
If ``True``, the axes will be rendered.
axes_font_name : See Below, optional
The font of the axes.
Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
axes_font_size : `int`, optional
The font size of the axes.
axes_font_style : {``normal``, ``italic``, ``oblique``}, optional
The font style of the axes.
axes_font_weight : See Below, optional
The font weight of the axes.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold, demibold, demi, bold, heavy, extra bold, black}
axes_x_limits : `float` or (`float`, `float`) or ``None``, optional
The limits of the x axis. If `float`, then it sets padding on the
right and left of the TriMesh as a percentage of the TriMesh's
width. If `tuple` or `list`, then it defines the axis limits. If
``None``, then the limits are set automatically.
axes_y_limits : (`float`, `float`) `tuple` or ``None``, optional
The limits of the y axis. If `float`, then it sets padding on the
top and bottom of the TriMesh as a percentage of the TriMesh's
height. If `tuple` or `list`, then it defines the axis limits. If
``None``, then the limits are set automatically.
axes_x_ticks : `list` or `tuple` or ``None``, optional
The ticks of the x axis.
axes_y_ticks : `list` or `tuple` or ``None``, optional
The ticks of the y axis.
figure_size : (`float`, `float`) `tuple` or ``None``, optional
The size of the figure in inches.
label : `str`, optional
The name entry in case of a legend.
Returns
-------
viewer : :map:`PointGraphViewer2d`
The viewer object.
Raises
------
warning
2D Viewing of Coloured TriMeshes is not supported, automatically
falls back to 2D :map:`TriMesh` viewing.
"""
import warnings
warnings.warn(Warning('2D Viewing of Textured TriMeshes is not '
'supported, falling back to TriMesh viewing.'))
return TriMesh._view_2d(
self, figure_id=figure_id, new_figure=new_figure,
image_view=image_view, render_lines=render_lines,
line_colour=line_colour, line_style=line_style,
line_width=line_width, render_markers=render_markers,
marker_style=marker_style, marker_size=marker_size,
marker_face_colour=marker_face_colour,
marker_edge_colour=marker_edge_colour,
marker_edge_width=marker_edge_width,
render_numbering=render_numbering,
numbers_horizontal_align=numbers_horizontal_align,
numbers_vertical_align=numbers_vertical_align,
numbers_font_name=numbers_font_name,
numbers_font_size=numbers_font_size,
numbers_font_style=numbers_font_style,
numbers_font_weight=numbers_font_weight,
numbers_font_colour=numbers_font_colour, render_axes=render_axes,
axes_font_name=axes_font_name, axes_font_size=axes_font_size,
axes_font_style=axes_font_style, axes_font_weight=axes_font_weight,
axes_x_limits=axes_x_limits, axes_y_limits=axes_y_limits,
axes_x_ticks=axes_x_ticks, axes_y_ticks=axes_y_ticks,
figure_size=figure_size, label=label)
def __str__(self):
return '{}\ntexture_shape: {}, n_texture_channels: {}'.format(
TriMesh.__str__(self), self.texture.shape, self.texture.n_channels)
| |
from .test_utils import TestCase
from confetti import Config
from confetti import get_config_object_from_proxy
from confetti import exceptions
from confetti import Metadata
class BasicUsageTest(TestCase):
def setUp(self):
super(BasicUsageTest, self).setUp()
self.conf = Config(dict(
a=dict(
b=2
)
))
def test_getting(self):
self.assertEquals(self.conf.root.a.b, 2)
def test_setting(self):
self.conf.root.a.b = 3
self.assertEquals(self.conf.root.a.b, 3)
def test_get_conf_from_proxy(self):
self.assertIs(get_config_object_from_proxy(self.conf.root), self.conf)
def test_proxy_hasattr(self):
self.assertFalse(hasattr(self.conf.root.a, 'c'))
self.assertIsNone(getattr(self.conf.root.a, 'c', None))
def test_proxy_dir(self):
self.assertEquals(dir(self.conf.root), ['a'])
self.assertEquals(dir(self.conf.root.a), ['b'])
def test_pop(self):
self.assertEquals(list(self.conf['a'].keys()), ['b'])
self.conf['a'].pop('b')
self.assertEquals(list(self.conf['a'].keys()), [])
def test_setting_existing_paths_through_setitem(self):
self.conf["a"]["b"] = 3
self.assertEquals(self.conf.root.a.b, 3)
def test_setting_existing_paths_through_assign_path(self):
self.conf.assign_path('a.b', 3)
self.assertEquals(self.conf.root.a.b, 3)
def test_setting_nonexistent_paths(self):
with self.assertRaises(exceptions.CannotSetValue):
self.conf['a']['c'] = 4
with self.assertRaises(AttributeError):
self.conf.root.a.c = 4
def test_getting_through_getitem(self):
self.assertIsInstance(self.conf['a'], Config)
def test_contains(self):
self.assertTrue("a" in self.conf)
self.assertFalse("b" in self.conf)
self.assertFalse("c" in self.conf["a"])
self.assertTrue("b" in self.conf["a"])
def test_item_not_found(self):
with self.assertRaises(AttributeError):
self.conf.root.a.c
def test_keys(self):
self.assertEquals(set(self.conf.keys()), set(['a']))
class ExtendingTest(TestCase):
def setUp(self):
super(ExtendingTest, self).setUp()
self.conf = Config({
"a": 1
})
def test_extend_single_value(self):
self.conf.extend({"b": 2})
self.assertEquals(self.conf.root.b, 2)
def test_extend_keyword_arguments(self):
self.conf.extend(b=2)
self.assertEquals(self.conf.root.b, 2)
def test_extend_structure(self):
self.conf.extend({
"b": {
"c": {
"d": 2
}
}
})
self.assertEquals(self.conf.root.b.c.d, 2)
def test_extend_structure_and_keywords(self):
self.conf.extend({"b": 1, "c": 3}, b=2)
self.assertEquals(self.conf.root.b, 2)
self.assertEquals(self.conf.root.c, 3)
def test_extend_preserves_nodes(self):
self.conf.extend({"b": {"c": 2}})
self.conf.extend({"b": {"d": 3}})
self.assertEquals(
self.conf.serialize_to_dict(),
{"a": 1, "b": {"c": 2, "d": 3}}
)
def test_extend_config(self):
self.conf.extend(Config({
"b": {
"c": {
"d": 2
}
}
}))
self.assertEquals(self.conf.root.b.c.d, 2)
def test_extend_config_propagates_changes(self):
new_cfg = Config({'b': {'c': 2}})
self.conf.extend(new_cfg)
self.assertEquals(self.conf.root.b.c, 2)
new_cfg.root.b.c = 3
self.assertEquals(self.conf.root.b.c, 3)
def test_extend_config_preserves_metadata(self):
new_cfg = Config({'b': {'c': 2 // Metadata(x=3)}})
self.conf.extend(new_cfg)
self.assertEquals(self.conf.get_config('b.c').metadata, {'x': 3})
def test_extend_config_preserves_nodes(self):
self.conf.extend(Config({"b": {"c": 2}}))
self.conf.extend(Config({"b": {"c": 2, "d": 3}}))
self.assertEquals(
self.conf.serialize_to_dict(),
{"a": 1, "b": {"c": 2, "d": 3}}
)
def test_extend_config_prevents_losing_path(self):
self.conf.extend(Config({"b": {"c": 2}}))
with self.assertRaises(exceptions.CannotSetValue):
self.conf.extend(Config({"b": {"d": 3}}))
def test_update_config_preserves_nodes(self):
self.conf.update(Config({"b": {"c": 2}}))
self.conf.update(Config({"b": {"d": 3}}))
self.assertEquals(
self.conf.serialize_to_dict(),
{"a": 1, "b": {"c": 2, "d": 3}}
)
class HelperMethodsTest(TestCase):
def setUp(self):
super(HelperMethodsTest, self).setUp()
self.config = Config({
"a": {
"b": 2,
"c": {
"d": 3,
},
"e": 4,
},
"f": 5,
})
def test_traverse_leaves(self):
self.assertEquals(
sorted((path, c.get_value())
for path, c in self.config.traverse_leaves()),
[("a.b", 2), ("a.c.d", 3), ("a.e", 4), ("f", 5)])
class CopyingTest(TestCase):
def test_copying_nested_dictionaries(self):
raw_conf = {"a": {"b": 2}}
conf1 = Config(raw_conf)
conf2 = Config(raw_conf)
conf1["a"]["b"] += 1
self.assertNotEquals(conf1["a"]["b"], conf2["a"]["b"])
class LinkedConfigurationTest(TestCase):
def setUp(self):
super(LinkedConfigurationTest, self).setUp()
self.conf1 = Config(dict(a=1))
self.conf2 = Config(dict(c=2))
self.conf1.extend({"b": self.conf2})
def test_linked_configurations(self):
self.assertIs(self.conf1['b'], self.conf2)
def test_linked_backup_and_restore(self):
self.conf1.backup()
self.conf2['c'] = 3
self.assertEquals(self.conf1.root.b.c, 3)
self.conf1['a'] = 2
self.conf1.restore()
self.assertEquals(self.conf1.root.b.c, 2)
def test_linked_backups_restore_parent_then_child(self):
self.conf2.backup()
self.conf1.backup()
self.conf2['c'] = 4
self.assertEquals(self.conf2.root.c, 4)
self.conf1.restore()
self.assertEquals(self.conf2.root.c, 2)
self.conf2['c'] = 5
self.assertEquals(self.conf2.root.c, 5)
self.conf2.restore()
self.assertEquals(self.conf2.root.c, 2)
class BackupTest(TestCase):
def setUp(self):
super(BackupTest, self).setUp()
self.conf = Config(dict(a=1, b=2, c=[]))
self.conf.extend(Config({'d': []}))
def test_backup_context(self):
with self.conf.backup_context():
self.conf.root.a = 10
assert self.conf.root.a == 10
assert self.conf.root.a == 1
def test_restore_no_backup(self):
with self.assertRaises(exceptions.NoBackup):
self.conf.restore()
def test_discard(self):
self.conf.backup()
self.conf.discard_backup()
with self.assertRaises(exceptions.NoBackup):
self.conf.restore()
def test_backup_copy(self):
self.conf.backup()
self.conf.root.c.append(0)
self.conf.root.d.extend([2, 3])
self.conf.restore()
self.assertEqual(self.conf.root.c, [])
self.assertEqual(self.conf.root.d, [])
class SerializationTest(TestCase):
def setUp(self):
super(SerializationTest, self).setUp()
self.dict = dict(
a=dict(
b=dict(
c=8
)
)
)
self.conf = Config(self.dict)
def test_serialization(self):
result = self.conf.serialize_to_dict()
self.assertIsNot(result, self.dict)
self.assertEquals(result, self.dict)
self.assertIsNot(result['a'], self.dict['a'])
self.assertEquals(result['a']['b']['c'], 8)
def test_serialization_with_assignment(self):
self.conf.assign_path("a.b.c", 9)
result = self.conf.serialize_to_dict()
self.assertEquals(result['a']['b']['c'], 9)
| |
"""
#;+
#; NAME:
#; x_guis
#; Version 1.0
#;
#; PURPOSE:
#; Module for Plotting GUIs
#; 07-Sep-2014 by JXP
#;-
#;------------------------------------------------------------------------------
"""
# Import libraries
import numpy as np
import pdb
import matplotlib.pyplot as plt
#from xastropy.xutils import xdebug as xdb
#def plot_1d_arrays -- Plot a series of arrays (as many as you want!!)
#def plot_hist -- Plot a simple histogram
#### ###############################
# Simplest quick plot
# Plot a series of arrays (as many as you want!!)
def plot_1d_arrays(*args,**kwargs):
"""
Plot arrays
Parameters
----------
outfil= : string
Outfil
xlbl,ylbl= : string
Labels for x,y axes
xrng,yrng= : List
Range of x,y limits
xtwo= : float
x-values for a second array
ytwo= : float
y-values for a second array
scatter= : Bool
True for a scatter plot
"""
# Error checking
if len(args) == 0:
print 'x_guis.simple_splot: No arguments!'
return
if not isinstance(args[0],np.ndarray):
print 'x_guis: Input array is not a numpy.ndarray!'
return
plt_dict = {}
# Outfil
if ('outfil' in kwargs):
plt_dict['outfil'] = kwargs['outfil']
kwargs.pop('outfil')
else:
plt_dict['outfil'] = None
# Scatter plot?
if ('scatter' in kwargs):
kwargs['marker'] = 'o'
kwargs.pop('scatter')
plt_dict['flg_scatt'] = 1
else:
plt_dict['flg_scatt'] = 0
# Second array?
if ('xtwo' in kwargs) & ('ytwo' in kwargs):
plt_dict['xtwo'] = kwargs['xtwo']
kwargs.pop('xtwo')
plt_dict['ytwo'] = kwargs['ytwo']
kwargs.pop('ytwo')
plt_dict['flg_two'] = 1
else:
plt_dict['flg_two'] = 0
# Limits
for irng in ['xrng','yrng']:
try:
plt_dict[irng] = kwargs[irng]
except KeyError:
plt_dict[irng] = None
else:
kwargs.pop(irng)
# Labels
for ilbl in ['xlbl','ylbl']:
try:
plt_dict[ilbl] = kwargs[ilbl]
except KeyError:
plt_dict[ilbl] = None
else:
kwargs.pop(ilbl)
# Clear
plt.clf()
# Plot it right up
if len(args) == 1:
plt.plot(args[0].flatten())
else:
for kk in range(1,len(args)):
if plt_dict['flg_scatt'] == 0:
plt.plot(args[0].flatten(),args[kk].flatten(), **kwargs)
else:
plt.scatter(args[0].flatten(),args[kk].flatten(), **kwargs)
if plt_dict['flg_two'] == 1:
plt.plot(plt_dict['xtwo'], plt_dict['ytwo'])
# Limits
if plt_dict['xrng'] is not None:
plt.xlim(plt_dict['xrng'])
if plt_dict['yrng'] is not None:
plt.ylim(plt_dict['yrng'])
# Label
if plt_dict['xlbl'] is not None:
plt.xlabel(plt_dict['xlbl'])
if plt_dict['ylbl'] is not None:
plt.ylabel(plt_dict['ylbl'])
# Output?
if plt_dict['outfil'] is not None:
plt.savefig(plt_dict['outfil'])
print('Wrote figure to {:s}'.format(plt_dict['outfil']))
# Show
plt.show()
return
#### ###############################
#Plot a simple histogram
def plot_hist(*args,**kwargs):
"""
Quick histogram plot
Parameters
----------
binsz : float
Width of the bin
ax : Matplot pyplot
Useful for sub plots
noshow : boolean (False)
Set keyword to True to not show to screen
noclear : boolean (False)
Set keyword to True to not clear the figure
"""
# Error checking
if len(args) == 0:
print 'x_guis.simple_splot: No arguments!'
return
if not isinstance(args[0],np.ndarray):
print 'x_guis: Input array is not a numpy.ndarray!'
return
plt_dict = {}
# Bin size
if not 'alpha' in kwargs:
kwargs['alpha'] = 1.
# Bin size
if not 'binsz' in kwargs:
if 'xrng' in kwargs:
tmp = args[0].flatten()
idx = np.where((tmp > kwargs['xrng'][0]) & (tmp < kwargs['xrng'][1]))[0]
kwargs['binsz'] = float(np.std(tmp[idx])/5.)
else:
kwargs['binsz'] = float(np.std(args[0].flatten())/5.)
#pdb.set_trace()
# Clear
if not 'noclear' in kwargs:
plt.clf()
# Ax
if not 'ax' in kwargs:
ax = plt
else:
ax = kwargs['ax']
# Plot
#pdb.set_trace()
if len(args) == 1:
arr = args[0].flatten()
# Find the range
if 'xrng' not in kwargs:
minv = np.amin(arr)
maxv = np.amax(arr)
else:
minv = kwargs['xrng'][0]
maxv = kwargs['xrng'][1]
# Set the boundaries sensibly given binsz
i0 = int( minv / kwargs['binsz']) - 1
i1 = int( maxv / kwargs['binsz']) + 1
rng = tuple( kwargs['binsz']*np.array([i0,i1]) )
nbin = i1-i0
# Histogram
hist, edges = np.histogram(arr, range=rng, bins=nbin)
ax.bar(edges[:-1], hist, width=kwargs['binsz'], alpha=kwargs['alpha'])
# Labels
if 'xlabel' in kwargs:
try:
ax.set_xlabel(kwargs['xlabel'])
except:
ax.xlabel(kwargs['xlabel'])
else:
pdb.set_trace() # Not ready for this yet
for kk in range(1,len(args)):
fig.plot(args[0].flatten(),args[kk].flatten())
# Finish
if not 'noshow' in kwargs:
plt.show()
return
## #################################
## #################################
## TESTING
## #################################
if __name__ == '__main__':
flg_test = 0
flg_test = 1 # simple plots
#flg_test += 2 # LLS plot
#flg_test += 2**2 # zpeak
#
#flg_test += 2**9 # LLS Survey NHI
#flg_test += 2**10 # LLS Survey ions
# simple plots
if (flg_test % 2**1) >= 2**0:
xval = np.arange(10)
yval = xval**2
#plot_1d_arrays(xval, yval) # line
plot_1d_arrays(xval, yval, scatter='True') #marker='o', linestyle='None') # dots
| |
# -*- coding: utf-8 -*-
"""
jinja2.testsuite.filters
~~~~~~~~~~~~~~~~~~~~~~~~
Tests for the jinja filters.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import pytest
from jinja2 import Markup, Environment
from jinja2._compat import text_type, implements_to_string
@pytest.mark.filter
class TestFilter():
def test_filter_calling(self, env):
rv = env.call_filter('sum', [1, 2, 3])
assert rv == 6
def test_capitalize(self, env):
tmpl = env.from_string('{{ "foo bar"|capitalize }}')
assert tmpl.render() == 'Foo bar'
def test_center(self, env):
tmpl = env.from_string('{{ "foo"|center(9) }}')
assert tmpl.render() == ' foo '
def test_default(self, env):
tmpl = env.from_string(
"{{ missing|default('no') }}|{{ false|default('no') }}|"
"{{ false|default('no', true) }}|{{ given|default('no') }}"
)
assert tmpl.render(given='yes') == 'no|False|no|yes'
def test_dictsort(self, env):
tmpl = env.from_string(
'{{ foo|dictsort }}|'
'{{ foo|dictsort(true) }}|'
'{{ foo|dictsort(false, "value") }}'
)
out = tmpl.render(foo={"aa": 0, "b": 1, "c": 2, "AB": 3})
assert out == ("[('aa', 0), ('AB', 3), ('b', 1), ('c', 2)]|"
"[('AB', 3), ('aa', 0), ('b', 1), ('c', 2)]|"
"[('aa', 0), ('b', 1), ('c', 2), ('AB', 3)]")
def test_batch(self, env):
tmpl = env.from_string("{{ foo|batch(3)|list }}|"
"{{ foo|batch(3, 'X')|list }}")
out = tmpl.render(foo=list(range(10)))
assert out == ("[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]|"
"[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 'X', 'X']]")
def test_slice(self, env):
tmpl = env.from_string('{{ foo|slice(3)|list }}|'
'{{ foo|slice(3, "X")|list }}')
out = tmpl.render(foo=list(range(10)))
assert out == ("[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9]]|"
"[[0, 1, 2, 3], [4, 5, 6, 'X'], [7, 8, 9, 'X']]")
def test_escape(self, env):
tmpl = env.from_string('''{{ '<">&'|escape }}''')
out = tmpl.render()
assert out == '<">&'
def test_striptags(self, env):
tmpl = env.from_string('''{{ foo|striptags }}''')
out = tmpl.render(foo=' <p>just a small \n <a href="#">'
'example</a> link</p>\n<p>to a webpage</p> '
'<!-- <p>and some commented stuff</p> -->')
assert out == 'just a small example link to a webpage'
def test_filesizeformat(self, env):
tmpl = env.from_string(
'{{ 100|filesizeformat }}|'
'{{ 1000|filesizeformat }}|'
'{{ 1000000|filesizeformat }}|'
'{{ 1000000000|filesizeformat }}|'
'{{ 1000000000000|filesizeformat }}|'
'{{ 100|filesizeformat(true) }}|'
'{{ 1000|filesizeformat(true) }}|'
'{{ 1000000|filesizeformat(true) }}|'
'{{ 1000000000|filesizeformat(true) }}|'
'{{ 1000000000000|filesizeformat(true) }}'
)
out = tmpl.render()
assert out == (
'100 Bytes|1.0 kB|1.0 MB|1.0 GB|1.0 TB|100 Bytes|'
'1000 Bytes|976.6 KiB|953.7 MiB|931.3 GiB'
)
def test_filesizeformat_issue59(self, env):
tmpl = env.from_string(
'{{ 300|filesizeformat }}|'
'{{ 3000|filesizeformat }}|'
'{{ 3000000|filesizeformat }}|'
'{{ 3000000000|filesizeformat }}|'
'{{ 3000000000000|filesizeformat }}|'
'{{ 300|filesizeformat(true) }}|'
'{{ 3000|filesizeformat(true) }}|'
'{{ 3000000|filesizeformat(true) }}'
)
out = tmpl.render()
assert out == (
'300 Bytes|3.0 kB|3.0 MB|3.0 GB|3.0 TB|300 Bytes|'
'2.9 KiB|2.9 MiB'
)
def test_first(self, env):
tmpl = env.from_string('{{ foo|first }}')
out = tmpl.render(foo=list(range(10)))
assert out == '0'
def test_float(self, env):
tmpl = env.from_string('{{ "42"|float }}|'
'{{ "ajsghasjgd"|float }}|'
'{{ "32.32"|float }}')
out = tmpl.render()
assert out == '42.0|0.0|32.32'
def test_format(self, env):
tmpl = env.from_string('''{{ "%s|%s"|format("a", "b") }}''')
out = tmpl.render()
assert out == 'a|b'
def test_indent(self, env):
tmpl = env.from_string('{{ foo|indent(2) }}|{{ foo|indent(2, true) }}')
text = '\n'.join([' '.join(['foo', 'bar'] * 2)] * 2)
out = tmpl.render(foo=text)
assert out == ('foo bar foo bar\n foo bar foo bar| '
'foo bar foo bar\n foo bar foo bar')
def test_int(self, env):
tmpl = env.from_string('{{ "42"|int }}|{{ "ajsghasjgd"|int }}|'
'{{ "32.32"|int }}|{{ "0x4d32"|int(0, 16) }}|'
'{{ "011"|int(0, 8)}}|{{ "0x33FU"|int(0, 16) }}')
out = tmpl.render()
assert out == '42|0|32|19762|9|0'
def test_join(self, env):
tmpl = env.from_string('{{ [1, 2, 3]|join("|") }}')
out = tmpl.render()
assert out == '1|2|3'
env2 = Environment(autoescape=True)
tmpl = env2.from_string(
'{{ ["<foo>", "<span>foo</span>"|safe]|join }}')
assert tmpl.render() == '<foo><span>foo</span>'
def test_join_attribute(self, env):
class User(object):
def __init__(self, username):
self.username = username
tmpl = env.from_string('''{{ users|join(', ', 'username') }}''')
assert tmpl.render(users=map(User, ['foo', 'bar'])) == 'foo, bar'
def test_last(self, env):
tmpl = env.from_string('''{{ foo|last }}''')
out = tmpl.render(foo=list(range(10)))
assert out == '9'
def test_length(self, env):
tmpl = env.from_string('''{{ "hello world"|length }}''')
out = tmpl.render()
assert out == '11'
def test_lower(self, env):
tmpl = env.from_string('''{{ "FOO"|lower }}''')
out = tmpl.render()
assert out == 'foo'
def test_pprint(self, env):
from pprint import pformat
tmpl = env.from_string('''{{ data|pprint }}''')
data = list(range(1000))
assert tmpl.render(data=data) == pformat(data)
def test_random(self, env):
tmpl = env.from_string('''{{ seq|random }}''')
seq = list(range(100))
for _ in range(10):
assert int(tmpl.render(seq=seq)) in seq
def test_reverse(self, env):
tmpl = env.from_string('{{ "foobar"|reverse|join }}|'
'{{ [1, 2, 3]|reverse|list }}')
assert tmpl.render() == 'raboof|[3, 2, 1]'
def test_string(self, env):
x = [1, 2, 3, 4, 5]
tmpl = env.from_string('''{{ obj|string }}''')
assert tmpl.render(obj=x) == text_type(x)
def test_title(self, env):
tmpl = env.from_string('''{{ "foo bar"|title }}''')
assert tmpl.render() == "Foo Bar"
tmpl = env.from_string('''{{ "foo's bar"|title }}''')
assert tmpl.render() == "Foo's Bar"
tmpl = env.from_string('''{{ "foo bar"|title }}''')
assert tmpl.render() == "Foo Bar"
tmpl = env.from_string('''{{ "f bar f"|title }}''')
assert tmpl.render() == "F Bar F"
tmpl = env.from_string('''{{ "foo-bar"|title }}''')
assert tmpl.render() == "Foo-Bar"
tmpl = env.from_string('''{{ "foo\tbar"|title }}''')
assert tmpl.render() == "Foo\tBar"
tmpl = env.from_string('''{{ "FOO\tBAR"|title }}''')
assert tmpl.render() == "Foo\tBar"
class Foo:
def __str__(self):
return 'foo-bar'
tmpl = env.from_string('''{{ data|title }}''')
out = tmpl.render(data=Foo())
assert out == 'Foo-Bar'
def test_truncate(self, env):
tmpl = env.from_string(
'{{ data|truncate(15, true, ">>>") }}|'
'{{ data|truncate(15, false, ">>>") }}|'
'{{ smalldata|truncate(15) }}'
)
out = tmpl.render(data='foobar baz bar' * 1000,
smalldata='foobar baz bar')
msg = 'Current output: %s' % out
assert out == 'foobar baz b>>>|foobar baz >>>|foobar baz bar', msg
def test_truncate_very_short(self, env):
tmpl = env.from_string(
'{{ "foo bar baz"|truncate(9) }}|'
'{{ "foo bar baz"|truncate(9, true) }}'
)
out = tmpl.render()
assert out == 'foo ...|foo ba...', out
def test_truncate_end_length(self, env):
tmpl = env.from_string('{{ "Joel is a slug"|truncate(9, true) }}')
out = tmpl.render()
assert out == 'Joel i...', 'Current output: %s' % out
def test_upper(self, env):
tmpl = env.from_string('{{ "foo"|upper }}')
assert tmpl.render() == 'FOO'
def test_urlize(self, env):
tmpl = env.from_string(
'{{ "foo http://www.example.com/ bar"|urlize }}')
assert tmpl.render() == 'foo <a href="http://www.example.com/">'\
'http://www.example.com/</a> bar'
def test_urlize_target_parameter(self, env):
tmpl = env.from_string(
'{{ "foo http://www.example.com/ bar"|urlize(target="_blank") }}'
)
assert tmpl.render() \
== 'foo <a href="http://www.example.com/" target="_blank">'\
'http://www.example.com/</a> bar'
tmpl = env.from_string(
'{{ "foo http://www.example.com/ bar"|urlize(target=42) }}'
)
assert tmpl.render() == 'foo <a href="http://www.example.com/">'\
'http://www.example.com/</a> bar'
def test_wordcount(self, env):
tmpl = env.from_string('{{ "foo bar baz"|wordcount }}')
assert tmpl.render() == '3'
def test_block(self, env):
tmpl = env.from_string(
'{% filter lower|escape %}<HEHE>{% endfilter %}'
)
assert tmpl.render() == '<hehe>'
def test_chaining(self, env):
tmpl = env.from_string(
'''{{ ['<foo>', '<bar>']|first|upper|escape }}'''
)
assert tmpl.render() == '<FOO>'
def test_sum(self, env):
tmpl = env.from_string('''{{ [1, 2, 3, 4, 5, 6]|sum }}''')
assert tmpl.render() == '21'
def test_sum_attributes(self, env):
tmpl = env.from_string('''{{ values|sum('value') }}''')
assert tmpl.render(values=[
{'value': 23},
{'value': 1},
{'value': 18},
]) == '42'
def test_sum_attributes_nested(self, env):
tmpl = env.from_string('''{{ values|sum('real.value') }}''')
assert tmpl.render(values=[
{'real': {'value': 23}},
{'real': {'value': 1}},
{'real': {'value': 18}},
]) == '42'
def test_sum_attributes_tuple(self, env):
tmpl = env.from_string('''{{ values.items()|sum('1') }}''')
assert tmpl.render(values={
'foo': 23,
'bar': 1,
'baz': 18,
}) == '42'
def test_abs(self, env):
tmpl = env.from_string('''{{ -1|abs }}|{{ 1|abs }}''')
assert tmpl.render() == '1|1', tmpl.render()
def test_round_positive(self, env):
tmpl = env.from_string('{{ 2.7|round }}|{{ 2.1|round }}|'
"{{ 2.1234|round(3, 'floor') }}|"
"{{ 2.1|round(0, 'ceil') }}")
assert tmpl.render() == '3.0|2.0|2.123|3.0', tmpl.render()
def test_round_negative(self, env):
tmpl = env.from_string('{{ 21.3|round(-1)}}|'
"{{ 21.3|round(-1, 'ceil')}}|"
"{{ 21.3|round(-1, 'floor')}}")
assert tmpl.render() == '20.0|30.0|20.0', tmpl.render()
def test_xmlattr(self, env):
tmpl = env.from_string(
"{{ {'foo': 42, 'bar': 23, 'fish': none, "
"'spam': missing, 'blub:blub': '<?>'}|xmlattr }}")
out = tmpl.render().split()
assert len(out) == 3
assert 'foo="42"' in out
assert 'bar="23"' in out
assert 'blub:blub="<?>"' in out
def test_sort1(self, env):
tmpl = env.from_string(
'{{ [2, 3, 1]|sort }}|{{ [2, 3, 1]|sort(true) }}')
assert tmpl.render() == '[1, 2, 3]|[3, 2, 1]'
def test_sort2(self, env):
tmpl = env.from_string('{{ "".join(["c", "A", "b", "D"]|sort) }}')
assert tmpl.render() == 'AbcD'
def test_sort3(self, env):
tmpl = env.from_string('''{{ ['foo', 'Bar', 'blah']|sort }}''')
assert tmpl.render() == "['Bar', 'blah', 'foo']"
def test_sort4(self, env):
@implements_to_string
class Magic(object):
def __init__(self, value):
self.value = value
def __str__(self):
return text_type(self.value)
tmpl = env.from_string('''{{ items|sort(attribute='value')|join }}''')
assert tmpl.render(items=map(Magic, [3, 2, 4, 1])) == '1234'
def test_groupby(self, env):
tmpl = env.from_string('''
{%- for grouper, list in [{'foo': 1, 'bar': 2},
{'foo': 2, 'bar': 3},
{'foo': 1, 'bar': 1},
{'foo': 3, 'bar': 4}]|groupby('foo') -%}
{{ grouper }}{% for x in list %}: {{ x.foo }}, {{ x.bar }}{% endfor %}|
{%- endfor %}''')
assert tmpl.render().split('|') == [
"1: 1, 2: 1, 1",
"2: 2, 3",
"3: 3, 4",
""
]
def test_groupby_tuple_index(self, env):
tmpl = env.from_string('''
{%- for grouper, list in [('a', 1), ('a', 2), ('b', 1)]|groupby(0) -%}
{{ grouper }}{% for x in list %}:{{ x.1 }}{% endfor %}|
{%- endfor %}''')
assert tmpl.render() == 'a:1:2|b:1|'
def test_groupby_multidot(self, env):
class Date(object):
def __init__(self, day, month, year):
self.day = day
self.month = month
self.year = year
class Article(object):
def __init__(self, title, *date):
self.date = Date(*date)
self.title = title
articles = [
Article('aha', 1, 1, 1970),
Article('interesting', 2, 1, 1970),
Article('really?', 3, 1, 1970),
Article('totally not', 1, 1, 1971)
]
tmpl = env.from_string('''
{%- for year, list in articles|groupby('date.year') -%}
{{ year }}{% for x in list %}[{{ x.title }}]{% endfor %}|
{%- endfor %}''')
assert tmpl.render(articles=articles).split('|') == [
'1970[aha][interesting][really?]',
'1971[totally not]',
''
]
def test_filtertag(self, env):
tmpl = env.from_string("{% filter upper|replace('FOO', 'foo') %}"
"foobar{% endfilter %}")
assert tmpl.render() == 'fooBAR'
def test_replace(self, env):
env = Environment()
tmpl = env.from_string('{{ string|replace("o", 42) }}')
assert tmpl.render(string='<foo>') == '<f4242>'
env = Environment(autoescape=True)
tmpl = env.from_string('{{ string|replace("o", 42) }}')
assert tmpl.render(string='<foo>') == '<f4242>'
tmpl = env.from_string('{{ string|replace("<", 42) }}')
assert tmpl.render(string='<foo>') == '42foo>'
tmpl = env.from_string('{{ string|replace("o", ">x<") }}')
assert tmpl.render(string=Markup('foo')) == 'f>x<>x<'
def test_forceescape(self, env):
tmpl = env.from_string('{{ x|forceescape }}')
assert tmpl.render(x=Markup('<div />')) == u'<div />'
def test_safe(self, env):
env = Environment(autoescape=True)
tmpl = env.from_string('{{ "<div>foo</div>"|safe }}')
assert tmpl.render() == '<div>foo</div>'
tmpl = env.from_string('{{ "<div>foo</div>" }}')
assert tmpl.render() == '<div>foo</div>'
def test_urlencode(self, env):
env = Environment(autoescape=True)
tmpl = env.from_string('{{ "Hello, world!"|urlencode }}')
assert tmpl.render() == 'Hello%2C%20world%21'
tmpl = env.from_string('{{ o|urlencode }}')
assert tmpl.render(o=u"Hello, world\u203d") \
== "Hello%2C%20world%E2%80%BD"
assert tmpl.render(o=(("f", 1),)) == "f=1"
assert tmpl.render(o=(('f', 1), ("z", 2))) == "f=1&z=2"
assert tmpl.render(o=((u"\u203d", 1),)) == "%E2%80%BD=1"
assert tmpl.render(o={u"\u203d": 1}) == "%E2%80%BD=1"
assert tmpl.render(o={0: 1}) == "0=1"
def test_simple_map(self, env):
env = Environment()
tmpl = env.from_string('{{ ["1", "2", "3"]|map("int")|sum }}')
assert tmpl.render() == '6'
def test_attribute_map(self, env):
class User(object):
def __init__(self, name):
self.name = name
env = Environment()
users = [
User('john'),
User('jane'),
User('mike'),
]
tmpl = env.from_string('{{ users|map(attribute="name")|join("|") }}')
assert tmpl.render(users=users) == 'john|jane|mike'
def test_empty_map(self, env):
env = Environment()
tmpl = env.from_string('{{ none|map("upper")|list }}')
assert tmpl.render() == '[]'
def test_simple_select(self, env):
env = Environment()
tmpl = env.from_string('{{ [1, 2, 3, 4, 5]|select("odd")|join("|") }}')
assert tmpl.render() == '1|3|5'
def test_bool_select(self, env):
env = Environment()
tmpl = env.from_string(
'{{ [none, false, 0, 1, 2, 3, 4, 5]|select|join("|") }}'
)
assert tmpl.render() == '1|2|3|4|5'
def test_simple_reject(self, env):
env = Environment()
tmpl = env.from_string('{{ [1, 2, 3, 4, 5]|reject("odd")|join("|") }}')
assert tmpl.render() == '2|4'
def test_bool_reject(self, env):
env = Environment()
tmpl = env.from_string(
'{{ [none, false, 0, 1, 2, 3, 4, 5]|reject|join("|") }}'
)
assert tmpl.render() == 'None|False|0'
def test_simple_select_attr(self, env):
class User(object):
def __init__(self, name, is_active):
self.name = name
self.is_active = is_active
env = Environment()
users = [
User('john', True),
User('jane', True),
User('mike', False),
]
tmpl = env.from_string(
'{{ users|selectattr("is_active")|'
'map(attribute="name")|join("|") }}'
)
assert tmpl.render(users=users) == 'john|jane'
def test_simple_reject_attr(self, env):
class User(object):
def __init__(self, name, is_active):
self.name = name
self.is_active = is_active
env = Environment()
users = [
User('john', True),
User('jane', True),
User('mike', False),
]
tmpl = env.from_string('{{ users|rejectattr("is_active")|'
'map(attribute="name")|join("|") }}')
assert tmpl.render(users=users) == 'mike'
def test_func_select_attr(self, env):
class User(object):
def __init__(self, id, name):
self.id = id
self.name = name
env = Environment()
users = [
User(1, 'john'),
User(2, 'jane'),
User(3, 'mike'),
]
tmpl = env.from_string('{{ users|selectattr("id", "odd")|'
'map(attribute="name")|join("|") }}')
assert tmpl.render(users=users) == 'john|mike'
def test_func_reject_attr(self, env):
class User(object):
def __init__(self, id, name):
self.id = id
self.name = name
env = Environment()
users = [
User(1, 'john'),
User(2, 'jane'),
User(3, 'mike'),
]
tmpl = env.from_string('{{ users|rejectattr("id", "odd")|'
'map(attribute="name")|join("|") }}')
assert tmpl.render(users=users) == 'jane'
| |
# -*- mode: python; coding: utf-8 -*-
# Copyright 2014-2019 Peter Williams <peter@newton.cx>
# Licensed under the GNU General Public License, version 3 or higher.
"""
Tools relating to working with NASA's ADS.
"""
from __future__ import absolute_import, division, print_function
import json
from .util import *
from . import webutil as wu
from .bibcore import *
__all__ = 'autolearn_bibcode search_ads'.split()
def _translate_ads_name(name):
pieces = [x.strip() for x in name.split(',', 1)]
surname = pieces[0].replace(' ', '_')
# TODO use bibcore
if len(pieces) > 1:
return pieces[1] + ' ' + surname
return surname
def _autolearn_bibcode_tag(info, tag, text):
# TODO: editors?
if tag == 'T':
info['title'] = text
elif tag == 'D':
info['year'] = int(text.split('/')[-1])
elif tag == 'B':
info['abstract'] = text
elif tag == 'A':
info['authors'] = [_translate_ads_name(n) for n in text.split(';')]
elif tag == 'Y':
subdata = dict(s.strip().split(': ', 1)
for s in text.split(';'))
if 'DOI' in subdata:
info['doi'] = subdata['DOI']
if 'eprintid' in subdata:
value = subdata['eprintid']
if value.startswith('arXiv:'):
info['arxiv'] = value[6:]
def autolearn_bibcode(app, bibcode):
"""Use the ADS export API to learn metadata given a bibcode.
We could maybe use a nicer API, but the existing code used the ADS tagged
format, so we stuck with it in the transition to the non-classic API.
"""
apikey = app.cfg.get_or_die('api-keys', 'ads')
url = 'https://api.adsabs.harvard.edu/v1/export/ads'
opener = wu.build_opener()
opener.addheaders = [
('Authorization', 'Bearer:' + apikey),
('Content-Type', 'application/json'),
]
post_data = {
'bibcode': [bibcode],
}
raw_content = opener.open(url, data=json.dumps(post_data).encode('utf8'))
payload = json.load(raw_content)
info = {'bibcode': bibcode, 'keep': 0} # because we're autolearning
curtag = curtext = None
print('[Parsing', url, '...]')
for line in payload['export'].splitlines():
line = line.strip()
if not len(line):
if curtag is not None:
_autolearn_bibcode_tag(info, curtag, curtext)
curtag = curtext = None
continue
if curtag is None:
if line[0] == '%':
# starting a new tag
curtag = line[1]
curtext = line[3:]
elif line.startswith('Retrieved '):
if not line.endswith('selected: 1.'):
die('matched more than one publication')
else:
if line[0] == '%':
# starting a new tag, while we had one going before.
# finish up the previous
_autolearn_bibcode_tag(info, curtag, curtext)
curtag = line[1]
curtext = line[3:]
else:
curtext += ' ' + line
if curtag is not None:
_autolearn_bibcode_tag(info, curtag, curtext)
return info
# Searching
def _run_ads_search(app, searchterms, filterterms, nrows=50):
# TODO: access to more API args
apikey = app.cfg.get_or_die('api-keys', 'ads')
q = [('q', ' '.join(searchterms))]
for ft in filterterms:
q.append(('fq', ft))
q.append(('fl', 'author,bibcode,title')) # fields list
q.append(('rows', nrows))
url = 'http://api.adsabs.harvard.edu/v1/search/query?' + wu.urlencode(q)
opener = wu.build_opener()
opener.addheaders = [('Authorization', 'Bearer:' + apikey)]
return json.load(opener.open(url))
def search_ads(app, terms, raw=False, large=False):
if len(terms) < 2:
die('require at least two search terms for ADS')
adsterms = []
astro_specific = True # default to this for this service
for info in terms:
if info[0] == 'year':
adsterms.append('year:%d' % info[1])
elif info[0] == 'surname':
adsterms.append('author:"%s"' % info[1])
elif info[0] == 'refereed':
if info[1]:
adsterms.append('property:refereed')
else:
adsterms.append('property:notrefereed')
elif info[0] == 'astro_specific':
astro_specific = info[1]
else:
die('don\'t know how to express search term %r to ADS', info)
if astro_specific:
filter_terms = ['database:astronomy']
else:
filter_terms = []
if large:
nrows = 1000
else:
nrows = 50
try:
r = _run_ads_search(app, adsterms, filter_terms, nrows=nrows) # XXX more hardcoding
except Exception as e:
die('could not perform ADS search: %s', e)
if raw:
import sys
json.dump(r, sys.stdout, ensure_ascii=False, indent=2, separators=(',', ': '))
return
query_row_limit = r.get('responseHeader', {}).get('params', {}).get('rows')
if query_row_limit is not None:
query_row_limit = int(query_row_limit)
else:
# default specified by ADS API docs:
# https://github.com/adsabs/adsabs-dev-api/blob/master/search.md
query_row_limit = 10
maxbclen = 0
info = []
if large:
ntrunc = 2147483647 # 2**31-1, probably big enough
else:
ntrunc = 20
nresults = len(r['response']['docs'])
for item in r['response']['docs'][:ntrunc]:
# year isn't important since it's embedded in bibcode.
if 'title' in item:
title = item['title'][0] # not sure why this is a list?
else:
title = '(no title)' # this happens, e.g.: 1991PhDT.......161G
bibcode = item['bibcode']
authors = ', '.join(parse_name(_translate_ads_name(n))[1] for n in item['author'])
maxbclen = max(maxbclen, len(bibcode))
info.append((bibcode, title, authors))
ofs = maxbclen + 2
red, reset = get_color_codes(None, 'red', 'reset')
for bc, title, authors in info:
print('%s%*s%s ' % (red, maxbclen, bc, reset), end='')
print_truncated(title, ofs, color='bold')
print(' ', end='')
print_truncated(authors, 4)
if nresults >= ntrunc:
print('')
if nresults < query_row_limit:
print('(showing %d of %d results)' % (ntrunc, nresults))
else:
print('(showing %d of at least %d results)' % (ntrunc, query_row_limit))
| |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
BOSH OpenStack Ironic CPI
"""
# Python 2 and 3 compatibility
from __future__ import unicode_literals
import json
import requests
import copy
import logging
try:
from urllib.parse import urlparse
except ImportError:
# Python 2
from urlparse import urlparse
class RegistryError(Exception):
"""
Base class for all Registry Exceptions
"""
def __init__(self, message):
self.message = message
def __repr__(self):
name = self.__class__.__name__
show = "<%s (%s)>" % (name, self.message)
return show
def __str__(self):
return self.message
class Registry(object):
def __init__(self, config, uuid, logger=None):
url = urlparse(config['url'])
username = config.get('username', url.username)
password = config.get('password', url.password)
cert = config.get('cacert')
timeout = config.get('timeout')
if not url.path.endswith('/'):
basepath = url.path + '/'
else:
basepath = url.path
self.endpoint = url.scheme + "://" + url.netloc + basepath
self.endpoint = self.endpoint + 'instances/' + uuid + "/settings"
self.url = url.scheme + "://"
if username:
self.url = self.url + username
if password:
self.url = self.url + ':' + password
self.url += '@'
self.url = self.url + url.netloc + url.path
self.session = requests.Session()
if username:
self.session.auth = (username, password)
if timeout:
self.session.timeout = timeout
self.session.headers.update({'Accept': 'application/json'})
self.session.verify = True if cert else False
self.session.cert = cert if cert else None
self.logger = logger
self.uuid = uuid
if not logger:
self.logger = logging.getLogger(self.__class__.__name__)
self.logger.debug("Initializing registry %s" % (self.__class__.__name__))
@staticmethod
def dict_merge(a, b):
"""
merges b into a and return merged result
NOTE: tuples and arbitrary objects are not handled as it is totally
ambiguous what should happen
"""
key = None
try:
if (a is None or
isinstance(a, str) or
isinstance(a, unicode) or
isinstance(a, int) or
isinstance(a, long) or
isinstance(a, float)):
# border case for first run or if a is a primitive
a = b
elif isinstance(a, list):
# lists can be only appended
if isinstance(b, list):
for item in b:
if item not in a:
a.append(b)
else:
if b not in a:
a.append(b)
elif isinstance(a, dict):
# dicts must be merged
if isinstance(b, dict):
for key in b:
if key in a:
a[key] = Registry.dict_merge(a[key], b[key])
else:
a[key] = b[key]
else:
msg = "Cannot merge non-dict '%s' into dict '%s'"
raise ValueError(msg % (b, a))
else:
msg = "Cannot merge tuples '%s':'%s'"
raise ValueError(msg % (b, a))
except TypeError as e:
msg = "TypeError in key '%s' when merging '%s' into '%s': %s"
raise TypeError(msg % (key, b, a, e))
return a
def read(self):
settings = None
try:
self.logger.debug("Doing HTTP GET '%s'" % (self.endpoint))
req = self.session.get(self.endpoint)
if req.status_code == requests.codes.ok:
result = req.json()
settings = result['settings']
if result['status'] != 'ok':
msg = "Registry error: %s" % (settings)
self.logger.error(msg)
raise RegistryError(msg)
return json.loads(settings)
else:
msg = "HTTP GET '%s': %s" % (self.endpoint, req.status_code)
self.logger.error(msg)
raise RegistryError(msg)
except RegistryError:
raise
except Exception as e:
msg = "Error '%s': %s" % (e.__class__.__name__, str(e))
self.logger.error(msg)
raise RegistryError(msg)
def push(self, settings):
try:
self.logger.debug("Doing HTTP PUT '%s': %s" % (self.endpoint, settings))
req = self.session.put(self.endpoint, data=json.dumps(settings))
except Exception as e:
msg = "Error '%s': %s" % (e.__class__.__name__, str(e))
self.logger.error(msg)
raise RegistryError(msg)
if req.status_code not in [201, 204]:
msg = "HTTP PUT '%s': %s" % (self.endpoint, req.status_code)
self.logger.error(msg)
raise RegistryError(msg)
return settings
def merge(self, settings):
old_settings = self.read()
try:
new_settings = self.dict_merge(old_settings, settings)
except Exception as e:
msg = str(e)
self.logger.error(msg)
raise RegistryError(msg)
return self.push(new_settings)
def delete(self, delete_uuid=False):
try:
self.logger.debug("Doing HTTP DELETE '%s'" % (self.endpoint))
req = self.session.delete(self.endpoint)
except Exception as e:
msg = "Error '%s': %s" % (e.__class__.__name__, str(e))
self.logger.error(msg)
raise RegistryError(msg)
if req.status_code not in [requests.codes.ok, requests.codes.no_content]:
msg = "HTTP DELETE '%s': %s" % (self.endpoint, req.status_code)
self.logger.error(msg)
raise RegistryError(msg)
if delete_uuid:
url = os.path.split(self.endpoint) + "/"
try:
self.logger.debug("Doing HTTP DELETE '%s'" % (url))
req = self.session.delete(url)
except Exception as e:
msg = "Error '%s': %s" % (e.__class__.__name__, str(e))
self.logger.error(msg)
raise RegistryError(msg)
if req.status_code not in [requests.codes.ok, requests.codes.no_content]:
msg = "HTTP DELETE '%s': %s" % (url, req.status_code)
self.logger.error(msg)
raise RegistryError(msg)
def create(self, agent_id, mbus, ntp=[], disk_system='/dev/sda',
disk_ephemeral='', blobstore={}, env={}, certs=None):
# Create registry contents
registry = {
'agent_id': agent_id,
'mbus': mbus,
'trusted_certs': certs,
'vm': {
'name': self.uuid,
'id': self.uuid
},
'blobstore': blobstore,
'disks': {
'system': disk_system,
'ephemeral': disk_ephemeral,
'persistent': {}
},
'networks': {},
'ntp': ntp,
'env': env,
}
try:
self.logger.debug("Doing HTTP PUT '%s': %s" % (self.endpoint, registry))
req = self.session.put(self.endpoint, data=json.dumps(registry))
except Exception as e:
msg = "Error '%s': %s" % (e.__class__.__name__, str(e))
self.logger.error(msg)
raise RegistryError(msg)
if req.status_code not in [201, 204]:
msg = "HTTP PUT '%s': %s" % (self.endpoint, req.status_code)
self.logger.error(msg)
raise RegistryError(msg)
return registry
def set_disk(self, disk, path, kind='persistent',
device_id=None, volume_id=None, lun=None, host=None, fs=None):
settings = {}
settings['disks'] = {}
settings['disks'][kind] = {}
settings['disks'][kind][disk] = {}
settings['disks'][kind][disk]['path'] = path
if device_id != None:
settings['disks'][kind][disk]['device_id'] = device_id
if volume_id != None:
settings['disks'][kind][disk]['volume_id'] = volume_id
if lun != None:
settings['disks'][kind][disk]['lun'] = lun
if host != None:
settings['disks'][kind][disk]['host_device_id'] = host
if fs != None:
settings['disks'][kind][disk]['file_system_type'] = fs
self.logger.debug("Updating disks '%s'" % (settings))
self.merge(settings)
return settings
def delete_disk(self, disk, kind='persistent'):
self.logger.debug("Deleting disk '%s' on '%s'" % (disk, self.uuid))
settings = self.read()
try:
del settings['disks'][kind][disk]
except:
msg = "Disk %s '%s' not found on '%s'!" % (kind, disk, self.uuid)
self.logger.error(msg)
raise RegistryError(msg)
return self.push(settings)
def set_network(self, name, spec={}, dns=None, mac=None, dhcp=False):
settings = {}
settings['networks'] = {}
net = copy.deepcopy(spec)
if 'type' not in net:
if 'ip' not in net:
net['type'] = 'dynamic'
else:
net['type'] = 'manual'
if dhcp or net['type'] == 'dynamic':
net['use_dhcp'] = True
net['type'] = 'dynamic'
if dns:
net['dns'] = dns
if mac:
net['mac'] = mac
settings['networks'][name] = net
self.logger.debug("Updating '%s' networks '%s'" % (self.uuid, settings))
return self.merge(settings)
def delete_network(self, name):
self.logger.debug("Deleting network '%s' on '%s'" % (name, self.uuid))
settings = self.read()
try:
del settings['networks'][name]
except:
msg = "Network '%s' not found on '%s'!" % (name, self.uuid)
self.logger.error(msg)
raise RegistryError(msg)
return self.push(settings)
def set_env_bosh(self, password=None, keep_root_password=None,
remove_dev_tools=None):
settings = {}
settings['env'] = {}
settings['env']['bosh'] = {}
if password:
settings['env']['bosh']['password'] = password
if keep_root_password != None:
settings['env']['bosh']['keep_root_password'] = keep_root_password
if remove_dev_tools != None:
settings['env']['bosh']['remove_dev_tools'] = remove_dev_tools
self.logger.debug("Updating bosh env settings '%s'" % (settings))
return self.merge(settings)
def set_env_persistent_disk_fs(self, persistent_disk_fs='ext4'):
settings = {}
settings['env'] = {}
settings['env']['persistent_disk_fs'] = persistent_disk_fs
self.logger.debug("Updating bosh env persistent disk fs settings '%s'" % (settings))
return self.merge(settings)
| |
#!/usr/bin/env python
#-----------------------------------------------------------------------------
# Copyright (c) 2013--, biocore development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
from os import getcwd, remove, rmdir, mkdir, path
import tempfile
import shutil
from unittest import TestCase, main
from cogent.util.misc import flatten
from cogent.core.moltype import DNA, RNA, PROTEIN
from cogent.core.alignment import DataError
from cogent.parse.rfam import (MinimalRfamParser, ChangedRnaSequence,
ChangedSequence)
from cogent.format.stockholm import stockholm_from_alignment
from cogent.struct.rna2d import ViennaStructure, wuss_to_vienna
from bfillings.infernal import (Cmalign, Cmbuild, Cmcalibrate, Cmemit, Cmscore,
Cmsearch, Cmstat, cmbuild_from_alignment,
cmbuild_from_file, cmalign_from_alignment,
cmalign_from_file, cmsearch_from_alignment,
cmsearch_from_file)
class GeneralSetUp(TestCase):
def setUp(self):
"""Infernal general setUp method for all tests"""
self.seqs1_unaligned = {'1':'ACUGCUAGCUAGUAGCGUACGUA',\
'2':'GCUACGUAGCUAC',\
'3':'GCGGCUAUUAGAUCGUA'}
self.struct1_unaligned_string = '....(((...)))....'
self.seqs1_unaligned_gaps = {'1':'ACUGCUAGCUAGU-AGCGUAC--GUA',\
'2':'--GCUACGUAGCUAC',\
'3':'GCGGCUAUUAGAUCGUA--'}
self.seqs2_aligned = {'a': 'UAGGCUCUGAUAUAAUAGCUCUC---------',\
'c': '------------UGACUACGCAU---------',\
'b': '----UAUCGCUUCGACGAUUCUCUGAUAGAGA'}
self.seqs2_unaligned = {'a': 'UAGGCUCUGAUAUAAUAGCUCUC',\
'c': 'UGACUACGCAU',\
'b': 'UAUCGCUUCGACGAUUCUCUGAUAGAGA'}
self.struct2_aligned_string = '............((.(...)))..........'
self.struct2_aligned_dict = {'SS_cons':self.struct2_aligned_string}
self.lines2 = stockholm_from_alignment(aln=self.seqs2_aligned,\
GC_annotation=self.struct2_aligned_dict)
#self.seqs1 aligned to self.seqs2 with self.seqs2 included.
self.seqs1_and_seqs2_aligned = \
{'a': 'UAGGCUCUGAUAUAAUAGC-UCUC---------',\
'b': '----UAUCGCUUCGACGAU-UCUCUGAUAGAGA',\
'c': '------------UGACUAC-GCAU---------',\
'1': '-ACUGCUAGCUAGUAGCGUACGUA---------',\
'2': '----------GCUACGUAG-CUAC---------',\
'3': '-----GCGGCUAUUAG-AU-CGUA---------',\
}
self.seqs1_and_seqs2_aligned_struct_string = \
'............((.(....)))..........'
#self.seqs1 aligned to self.seqs2 without self.seqs2 included.
self.seqs1_aligned = \
{'1': 'ACUGCUAGCUAGUAGCGUACGUA',\
'2': '---------GCUACGUAG-CUAC',\
'3': '----GCGGCUAUUAG-AU-CGUA',\
}
self.seqs1_aligned_struct_string = \
'...........((.(....))).'
self.temp_dir = tempfile.mkdtemp()
self.temp_dir_spaces = '/tmp/test for infernal/'
try:
mkdir(self.temp_dir_spaces)
except OSError:
pass
try:
#create sequence files
f = open(path.join(self.temp_dir, 'seqs1.sto'),'w')
f.write(self.lines2)
f.close()
#create cm file.
self.cmfile = path.join(self.temp_dir, 'aln2.cm')
cm = open(self.cmfile,'w')
cm.write(ALN1_CM)
cm.close()
#create alignment file used to create cm file.
self.aln2_file = path.join(self.temp_dir, 'aln2.sto')
af = open(self.aln2_file,'w')
af.write(self.lines2)
af.close()
except OSError:
pass
class CmalignTests(GeneralSetUp):
"""Tests for the Cmalign application controller"""
def test_base_command(self):
"""Infernal BaseCommand should return the correct BaseCommand"""
c = Cmalign()
self.assertEqual(c.BaseCommand,\
''.join(['cd "',getcwd(),'/"; ','cmalign']))
c.Parameters['-l'].on()
self.assertEqual(c.BaseCommand,\
''.join(['cd "',getcwd(),'/"; ','cmalign -l']))
def test_changing_working_dir(self):
"""Infernal BaseCommand should change according to WorkingDir"""
c = Cmalign(WorkingDir='/tmp/cmalign_test')
self.assertEqual(c.BaseCommand,\
''.join(['cd "','/tmp/cmalign_test','/"; ','cmalign']))
c = Cmalign()
c.WorkingDir = '/tmp/cmalign_test2'
self.assertEqual(c.BaseCommand,\
''.join(['cd "','/tmp/cmalign_test2','/"; ','cmalign']))
#removing the dirs is proof that they were created at the same time
#if the dirs are not there, an OSError will be raised
rmdir('/tmp/cmalign_test')
rmdir('/tmp/cmalign_test2')
def test_general_cleanUp(self):
"""Last test executed: cleans up all files initially created"""
# remove the tempdir and contents
shutil.rmtree(self.temp_dir)
shutil.rmtree(self.temp_dir_spaces)
def test_cmalign_from_alignment(self):
"""cmalign_from_alignment should work as expected.
"""
#Align with cmalign_from_alignment without original alignment.
aln, struct = cmalign_from_alignment(aln=self.seqs2_aligned,\
structure_string=self.struct2_aligned_string,\
seqs=self.seqs1_unaligned_gaps,moltype=RNA,include_aln=False)
#Check correct alignment
self.assertEqual(aln.todict(),self.seqs1_aligned)
#Check correct struct
self.assertEqual(wuss_to_vienna(str(struct)),\
self.seqs1_aligned_struct_string)
#should work with gapped seqs. Need to test this is taken care of
# since cmalign segfaults when there are gaps in the seqs to be aligned.
aln, struct = cmalign_from_alignment(aln=self.seqs2_aligned,\
structure_string=self.struct2_aligned_string,\
seqs=self.seqs1_unaligned_gaps,moltype=RNA)
#alignment should be correct
self.assertEqual(aln.todict(),self.seqs1_and_seqs2_aligned)
#structure should be correct
self.assertEqual(wuss_to_vienna(str(struct)),\
self.seqs1_and_seqs2_aligned_struct_string)
#should work with ungapped seqs.
aln, struct = cmalign_from_alignment(aln=self.seqs2_aligned,\
structure_string=self.struct2_aligned_string,\
seqs=self.seqs1_unaligned_gaps,moltype=RNA)
#alignment should be correct
self.assertEqual(aln.todict(),self.seqs1_and_seqs2_aligned)
#structure should be correct
self.assertEqual(wuss_to_vienna(str(struct)),\
self.seqs1_and_seqs2_aligned_struct_string)
#should return standard out
aln, struct,stdout = cmalign_from_alignment(aln=self.seqs2_aligned,\
structure_string=self.struct2_aligned_string,\
seqs=self.seqs1_unaligned_gaps,moltype=RNA,\
return_stdout=True)
#Test that standard out is same length as expected
self.assertEqual(len(stdout.split('\n')),\
len(CMALIGN_STDOUT.split('\n')))
def test_cmalign_from_file(self):
"""cmalign_from_file should work as expected.
"""
#Align with cmalign_from_file without original alignment.
aln,struct = cmalign_from_file(cm_file_path=self.cmfile,\
seqs=self.seqs1_unaligned,\
moltype=RNA)
#Check correct alignment
self.assertEqual(aln.todict(),self.seqs1_aligned)
#Check correct struct
self.assertEqual(wuss_to_vienna(str(struct)),\
self.seqs1_aligned_struct_string)
#Align with cmalign_from_file using original alignment.
aln,struct = cmalign_from_file(cm_file_path=self.cmfile,\
seqs=self.seqs1_unaligned,\
moltype=RNA,\
alignment_file_path=self.aln2_file,\
include_aln=True)
#alignment should be correct
self.assertEqual(aln.todict(),self.seqs1_and_seqs2_aligned)
#structure should be correct
self.assertEqual(wuss_to_vienna(str(struct)),\
self.seqs1_and_seqs2_aligned_struct_string)
class CmbuildTests(GeneralSetUp):
"""Tests for the Cmbuild application controller"""
def test_base_command(self):
"""Infernal BaseCommand should return the correct BaseCommand"""
c = Cmbuild()
self.assertEqual(c.BaseCommand,\
''.join(['cd "',getcwd(),'/"; ','cmbuild']))
c.Parameters['-A'].on()
self.assertEqual(c.BaseCommand,\
''.join(['cd "',getcwd(),'/"; ','cmbuild -A']))
def test_changing_working_dir(self):
"""Infernal BaseCommand should change according to WorkingDir"""
c = Cmbuild(WorkingDir='/tmp/cmbuild_test')
self.assertEqual(c.BaseCommand,\
''.join(['cd "','/tmp/cmbuild_test','/"; ','cmbuild']))
c = Cmbuild()
c.WorkingDir = '/tmp/cmbuild_test2'
self.assertEqual(c.BaseCommand,\
''.join(['cd "','/tmp/cmbuild_test2','/"; ','cmbuild']))
#removing the dirs is proof that they were created at the same time
#if the dirs are not there, an OSError will be raised
rmdir('/tmp/cmbuild_test')
rmdir('/tmp/cmbuild_test2')
def test_general_cleanUp(self):
"""Last test executed: cleans up all files initially created"""
# remove the tempdir and contents
shutil.rmtree(self.temp_dir)
shutil.rmtree(self.temp_dir_spaces)
def test_cmbuild_from_alignment(self):
"""cmbuild_from_alignment should work as expected.
"""
#Test unaligned seqs and unaligned struct fail.
#DataError should be raised with Alignment is constructed
self.assertRaises(DataError,cmbuild_from_alignment,\
self.seqs1_unaligned,self.struct1_unaligned_string)
#Test aligned seqs and unaligned struct fail.
self.assertRaises(ValueError,cmbuild_from_alignment,\
self.seqs2_aligned,self.struct1_unaligned_string)
#Test get cm back without alignment.
cm_res = cmbuild_from_alignment(self.seqs2_aligned,\
self.struct2_aligned_string)
cm_lines = cm_res.split('\n')
ALN1_CM_lines = ALN1_CM.split('\n')
#Check that the same number of lines are in both CMs
self.assertEqual(len(cm_lines),len(ALN1_CM_lines))
#The first 13 lines are unique to the specific run. The res of the
# CM should be the same, since built from the same data.
self.assertEqual(cm_lines[13:],ALN1_CM_lines[13:])
#Make sure same alignment is returned if return_alignment=True
cm_res, cm_aln = cmbuild_from_alignment(self.seqs2_aligned,\
self.struct2_aligned_string,return_alignment=True)
self.assertEqual(cm_aln,self.lines2)
def test_cmbuild_from_file(self):
"""cmbuild_from_file should work as expected.
"""
cm_res = cmbuild_from_file(self.temp_dir+'/seqs1.sto')
cm_lines = cm_res.split('\n')
ALN1_CM_lines = ALN1_CM.split('\n')
#Check that the same number of lines are in both CMs
self.assertEqual(len(cm_lines),len(ALN1_CM_lines))
#The first 13 lines are unique to the specific run. The res of the
# CM should be the same, since built from the same data.
self.assertEqual(cm_lines[13:],ALN1_CM_lines[13:])
#Make sure same alignment is returned if return_alignment=True
cm_res, cm_aln = cmbuild_from_alignment(self.seqs2_aligned,\
self.struct2_aligned_string,return_alignment=True)
self.assertEqual(cm_aln,self.lines2)
class CmcalibrateTests(GeneralSetUp):
"""Tests for the Cmcalibrate application controller"""
def test_base_command(self):
"""Infernal BaseCommand should return the correct BaseCommand"""
c = Cmcalibrate()
self.assertEqual(c.BaseCommand,\
''.join(['cd "',getcwd(),'/"; ','cmcalibrate']))
c.Parameters['--mpi'].on()
self.assertEqual(c.BaseCommand,\
''.join(['cd "',getcwd(),'/"; ','cmcalibrate --mpi']))
def test_changing_working_dir(self):
"""Infernal BaseCommand should change according to WorkingDir"""
c = Cmcalibrate(WorkingDir='/tmp/cmcalibrate_test')
self.assertEqual(c.BaseCommand,\
''.join(['cd "','/tmp/cmcalibrate_test','/"; ','cmcalibrate']))
c = Cmcalibrate()
c.WorkingDir = '/tmp/cmcalibrate_test2'
self.assertEqual(c.BaseCommand,\
''.join(['cd "','/tmp/cmcalibrate_test2','/"; ','cmcalibrate']))
#removing the dirs is proof that they were created at the same time
#if the dirs are not there, an OSError will be raised
rmdir('/tmp/cmcalibrate_test')
rmdir('/tmp/cmcalibrate_test2')
def test_general_cleanUp(self):
"""Last test executed: cleans up all files initially created"""
# remove the tempdir and contents
shutil.rmtree(self.temp_dir)
shutil.rmtree(self.temp_dir_spaces)
class CmemitTests(GeneralSetUp):
"""Tests for the Cmemit application controller"""
def test_base_command(self):
"""Infernal BaseCommand should return the correct BaseCommand"""
c = Cmemit()
self.assertEqual(c.BaseCommand,\
''.join(['cd "',getcwd(),'/"; ','cmemit']))
c.Parameters['-u'].on()
self.assertEqual(c.BaseCommand,\
''.join(['cd "',getcwd(),'/"; ','cmemit -u']))
def test_changing_working_dir(self):
"""Infernal BaseCommand should change according to WorkingDir"""
c = Cmemit(WorkingDir='/tmp/cmemit_test')
self.assertEqual(c.BaseCommand,\
''.join(['cd "','/tmp/cmemit_test','/"; ','cmemit']))
c = Cmemit()
c.WorkingDir = '/tmp/cmemit_test2'
self.assertEqual(c.BaseCommand,\
''.join(['cd "','/tmp/cmemit_test2','/"; ','cmemit']))
#removing the dirs is proof that they were created at the same time
#if the dirs are not there, an OSError will be raised
rmdir('/tmp/cmemit_test')
rmdir('/tmp/cmemit_test2')
def test_general_cleanUp(self):
"""Last test executed: cleans up all files initially created"""
# remove the tempdir and contents
shutil.rmtree(self.temp_dir)
shutil.rmtree(self.temp_dir_spaces)
class CmscoreTests(GeneralSetUp):
"""Tests for the Cmscore application controller"""
def test_base_command(self):
"""Infernal BaseCommand should return the correct BaseCommand"""
c = Cmscore()
self.assertEqual(c.BaseCommand,\
''.join(['cd "',getcwd(),'/"; ','cmscore']))
c.Parameters['-l'].on()
self.assertEqual(c.BaseCommand,\
''.join(['cd "',getcwd(),'/"; ','cmscore -l']))
def test_changing_working_dir(self):
"""Infernal BaseCommand should change according to WorkingDir"""
c = Cmscore(WorkingDir='/tmp/cmscore_test')
self.assertEqual(c.BaseCommand,\
''.join(['cd "','/tmp/cmscore_test','/"; ','cmscore']))
c = Cmscore()
c.WorkingDir = '/tmp/cmscore_test2'
self.assertEqual(c.BaseCommand,\
''.join(['cd "','/tmp/cmscore_test2','/"; ','cmscore']))
#removing the dirs is proof that they were created at the same time
#if the dirs are not there, an OSError will be raised
rmdir('/tmp/cmscore_test')
rmdir('/tmp/cmscore_test2')
def test_general_cleanUp(self):
"""Last test executed: cleans up all files initially created"""
# remove the tempdir and contents
shutil.rmtree(self.temp_dir)
shutil.rmtree(self.temp_dir_spaces)
class CmsearchTests(GeneralSetUp):
"""Tests for the Cmsearch application controller"""
def test_base_command(self):
"""Infernal BaseCommand should return the correct BaseCommand"""
c = Cmsearch()
self.assertEqual(c.BaseCommand,\
''.join(['cd "',getcwd(),'/"; ','cmsearch']))
c.Parameters['-p'].on()
self.assertEqual(c.BaseCommand,\
''.join(['cd "',getcwd(),'/"; ','cmsearch -p']))
def test_changing_working_dir(self):
"""Infernal BaseCommand should change according to WorkingDir"""
c = Cmsearch(WorkingDir='/tmp/cmsearch_test')
self.assertEqual(c.BaseCommand,\
''.join(['cd "','/tmp/cmsearch_test','/"; ','cmsearch']))
c = Cmsearch()
c.WorkingDir = '/tmp/cmsearch_test2'
self.assertEqual(c.BaseCommand,\
''.join(['cd "','/tmp/cmsearch_test2','/"; ','cmsearch']))
#removing the dirs is proof that they were created at the same time
#if the dirs are not there, an OSError will be raised
rmdir('/tmp/cmsearch_test')
rmdir('/tmp/cmsearch_test2')
def test_general_cleanUp(self):
"""Last test executed: cleans up all files initially created"""
# remove the tempdir and contents
shutil.rmtree(self.temp_dir)
shutil.rmtree(self.temp_dir_spaces)
def test_cmsearch_from_alignment_no_hits(self):
"""cmsearch_from_alignment should work as expected
"""
search_res = cmsearch_from_alignment(aln=self.seqs2_aligned,\
structure_string=self.struct2_aligned_string,\
seqs=self.seqs1_unaligned,moltype=RNA)
self.assertEqual(search_res,[])
def test_cmsearch_from_alignment(self):
"""cmsearch_from_alignment should work as expected
"""
exp_search_res = [['a', 5, 23, 1, 19, 12.85, '-', 37],\
['b', 1, 19, 1, 19, 14.359999999999999, '-', 47]]
search_res = cmsearch_from_alignment(aln=self.seqs2_aligned,\
structure_string=self.struct2_aligned_string,\
seqs=self.seqs2_unaligned,moltype=RNA)
for search, exp in zip(search_res, exp_search_res):
self.assertEqual(search[1:],exp)
def test_cmsearch_from_file_no_hits(self):
"""cmsearch_from_file should work as expected
"""
search_res = cmsearch_from_file(cm_file_path=self.cmfile,\
seqs=self.seqs1_unaligned,moltype=RNA)
self.assertEqual(search_res,[])
def test_cmsearch_from_file(self):
"""cmsearch_from_file should work as expected
"""
exp_search_res = [['a', 5, 23, 1, 19, 12.85, '-', 37],\
['b', 1, 19, 1, 19, 14.359999999999999, '-', 47]]
search_res = cmsearch_from_file(cm_file_path=self.cmfile,\
seqs=self.seqs2_unaligned,moltype=RNA)
for search, exp in zip(search_res, exp_search_res):
self.assertEqual(search[1:],exp)
class CmstatTests(GeneralSetUp):
"""Tests for the Cmstat application controller"""
def test_base_command(self):
"""Infernal BaseCommand should return the correct BaseCommand"""
c = Cmstat()
self.assertEqual(c.BaseCommand,\
''.join(['cd "',getcwd(),'/"; ','cmstat']))
c.Parameters['-g'].on()
self.assertEqual(c.BaseCommand,\
''.join(['cd "',getcwd(),'/"; ','cmstat -g']))
def test_changing_working_dir(self):
"""Infernal BaseCommand should change according to WorkingDir"""
c = Cmstat(WorkingDir='/tmp/cmstat_test')
self.assertEqual(c.BaseCommand,\
''.join(['cd "','/tmp/cmstat_test','/"; ','cmstat']))
c = Cmstat()
c.WorkingDir = '/tmp/cmstat_test2'
self.assertEqual(c.BaseCommand,\
''.join(['cd "','/tmp/cmstat_test2','/"; ','cmstat']))
#removing the dirs is proof that they were created at the same time
#if the dirs are not there, an OSError will be raised
rmdir('/tmp/cmstat_test')
rmdir('/tmp/cmstat_test2')
def test_general_cleanUp(self):
"""Last test executed: cleans up all files initially created"""
# remove the tempdir and contents
shutil.rmtree(self.temp_dir)
shutil.rmtree(self.temp_dir_spaces)
ALN1_CM = """INFERNAL-1 [1.0rc1]
NAME aln1-1
STATES 61
NODES 18
ALPHABET 1
ELSELF -0.08926734
WBETA 1e-07
NSEQ 3
EFFNSEQ 3.000
CLEN 19
BCOM cmbuild aln1.cm aln1.sto
BDATE Sun Oct 5 18:45:35 2008
NULL 0.000 0.000 0.000 0.000
MODEL:
[ ROOT 0 ]
S 0 -1 0 1 4 -2.071 -2.210 -1.649 -2.140
IL 1 1 2 1 4 -0.556 -5.022 -1.818 -7.508 0.000 0.000 0.000 0.000
IR 2 2 3 2 3 -0.310 -2.439 -6.805 0.000 0.000 0.000 0.000
[ MATL 1 ]
ML 3 2 3 5 3 -8.003 -0.020 -6.657 -0.389 0.377 -1.236 0.597
D 4 2 3 5 3 -7.923 -3.436 -0.146
IL 5 5 3 5 3 -1.442 -0.798 -4.142 0.000 0.000 0.000 0.000
[ MATL 2 ]
ML 6 5 3 8 3 -8.003 -0.020 -6.657 0.711 -1.015 -1.162 0.507
D 7 5 3 8 3 -7.923 -3.436 -0.146
IL 8 8 3 8 3 -1.442 -0.798 -4.142 0.000 0.000 0.000 0.000
[ MATL 3 ]
ML 9 8 3 11 3 -8.003 -0.020 -6.657 -0.389 0.377 -1.236 0.597
D 10 8 3 11 3 -7.923 -3.436 -0.146
IL 11 11 3 11 3 -1.442 -0.798 -4.142 0.000 0.000 0.000 0.000
[ MATL 4 ]
ML 12 11 3 14 3 -8.003 -0.020 -6.657 -0.392 0.246 -1.238 0.703
D 13 11 3 14 3 -7.923 -3.436 -0.146
IL 14 14 3 14 3 -1.442 -0.798 -4.142 0.000 0.000 0.000 0.000
[ MATL 5 ]
ML 15 14 3 17 3 -8.003 -0.020 -6.657 -1.340 -2.411 1.644 -1.777
D 16 14 3 17 3 -7.923 -3.436 -0.146
IL 17 17 3 17 3 -1.442 -0.798 -4.142 0.000 0.000 0.000 0.000
[ MATL 6 ]
ML 18 17 3 20 3 -8.003 -0.020 -6.657 0.830 0.106 -1.204 -0.492
D 19 17 3 20 3 -7.923 -3.436 -0.146
IL 20 20 3 20 3 -1.442 -0.798 -4.142 0.000 0.000 0.000 0.000
[ MATL 7 ]
ML 21 20 3 23 3 -8.003 -0.020 -6.657 -1.143 -1.575 -1.925 1.560
D 22 20 3 23 3 -7.923 -3.436 -0.146
IL 23 23 3 23 3 -1.442 -0.798 -4.142 0.000 0.000 0.000 0.000
[ MATL 8 ]
ML 24 23 3 26 3 -8.391 -0.018 -6.709 0.821 -1.044 -1.178 0.385
D 25 23 3 26 3 -6.905 -0.258 -2.688
IL 26 26 3 26 3 -1.925 -0.554 -4.164 0.000 0.000 0.000 0.000
[ MATR 9 ]
MR 27 26 3 29 5 -7.411 -0.031 -7.227 -7.439 -8.330 -0.726 0.967 -1.567 0.142
D 28 26 3 29 5 -5.352 -0.707 -2.978 -4.409 -2.404
IR 29 29 3 29 5 -2.408 -0.496 -5.920 -4.087 -5.193 0.000 0.000 0.000 0.000
[ MATP 10 ]
MP 30 29 3 34 6 -9.266 -9.205 -0.019 -7.982 -8.261 -8.656 -1.570 -1.865 -1.898 0.327 -1.331 -2.318 0.651 0.994 -1.872 0.282 -2.224 -0.666 1.972 -1.608 -0.242 1.187
ML 31 29 3 34 6 -6.250 -6.596 -1.310 -1.005 -6.446 -3.975 0.660 -0.612 -0.293 -0.076
MR 32 29 3 34 6 -6.988 -5.717 -1.625 -5.695 -0.829 -3.908 0.660 -0.612 -0.293 -0.076
D 33 29 3 34 6 -9.049 -7.747 -3.544 -4.226 -4.244 -0.319
IL 34 34 5 34 6 -2.579 -2.842 -0.760 -4.497 -5.274 -4.934 0.000 0.000 0.000 0.000
IR 35 35 6 35 5 -2.408 -0.496 -5.920 -4.087 -5.193 0.000 0.000 0.000 0.000
[ MATP 11 ]
MP 36 35 6 40 4 -7.331 -7.538 -0.041 -5.952 -4.114 0.397 -4.664 0.815 -4.665 -4.015 -0.462 -4.315 -3.939 3.331 -3.732 -0.830 -0.398 -3.640 -1.958 -3.517
ML 37 35 6 40 4 -3.758 -3.940 -0.507 -2.670 0.660 -0.612 -0.293 -0.076
MR 38 35 6 40 4 -4.809 -3.838 -1.706 -0.766 0.660 -0.612 -0.293 -0.076
D 39 35 6 40 4 -4.568 -4.250 -2.265 -0.520
IL 40 40 5 40 4 -1.686 -2.369 -1.117 -4.855 0.000 0.000 0.000 0.000
IR 41 41 6 41 3 -1.442 -0.798 -4.142 0.000 0.000 0.000 0.000
[ MATL 12 ]
ML 42 41 6 44 5 -7.411 -0.031 -7.227 -7.439 -8.330 1.826 -2.947 -2.856 -2.413
D 43 41 6 44 5 -4.959 -0.803 -4.221 -2.596 -2.508
IL 44 44 3 44 5 -2.408 -0.496 -4.087 -5.920 -5.193 0.000 0.000 0.000 0.000
[ MATP 13 ]
MP 45 44 3 49 4 -7.331 -7.538 -0.041 -5.952 -1.592 -1.722 -1.807 0.471 -1.387 -2.146 1.822 0.774 -1.836 0.505 -2.076 -0.521 1.055 -1.515 -0.260 0.958
ML 46 44 3 49 4 -3.758 -3.940 -0.507 -2.670 0.660 -0.612 -0.293 -0.076
MR 47 44 3 49 4 -4.809 -3.838 -1.706 -0.766 0.660 -0.612 -0.293 -0.076
D 48 44 3 49 4 -4.568 -4.250 -2.265 -0.520
IL 49 49 5 49 4 -1.686 -2.369 -1.117 -4.855 0.000 0.000 0.000 0.000
IR 50 50 6 50 3 -1.442 -0.798 -4.142 0.000 0.000 0.000 0.000
[ MATL 14 ]
ML 51 50 6 53 3 -8.323 -0.016 -6.977 0.481 -1.091 -0.011 0.192
D 52 50 6 53 3 -6.174 -1.687 -0.566
IL 53 53 3 53 3 -1.442 -0.798 -4.142 0.000 0.000 0.000 0.000
[ MATL 15 ]
ML 54 53 3 56 3 -8.323 -0.016 -6.977 1.148 -1.570 -0.075 -1.007
D 55 53 3 56 3 -6.174 -1.687 -0.566
IL 56 56 3 56 3 -1.442 -0.798 -4.142 0.000 0.000 0.000 0.000
[ MATL 16 ]
ML 57 56 3 59 2 * 0.000 -0.726 0.967 -1.567 0.142
D 58 56 3 59 2 * 0.000
IL 59 59 3 59 2 -1.823 -0.479 0.000 0.000 0.000 0.000
[ END 17 ]
E 60 59 3 -1 0
//
"""
CMALIGN_STDOUT = """# cmalign :: align sequences to an RNA CM
# INFERNAL 1.0rc1 (June 2008)
# Copyright 2007-2009 (C) 2008 HHMI Janelia Farm Research Campus
# Freely distributed under the GNU General Public License (GPL)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# command: cmalign --withali aln1.sto -o all_aligned.sto aln1.cm seqs1.fasta
# date: Sun Oct 5 22:04:30 2008
#
# cm name algorithm config sub bands tau
# ------------------------- --------- ------ --- ----- ------
# aln1-1 opt acc global no hmm 1e-07
#
# bit scores
# ------------------
# seq idx seq name len total struct avg prob elapsed
# ------- -------- ----- -------- -------- -------- -----------
1 1 23 -9.98 5.71 0.260 00:00:00.01
2 2 13 -6.79 6.73 0.710 00:00:00.00
3 3 17 -7.43 5.86 0.754 00:00:00.01
# Alignment saved in file all_aligned.sto.
#
# CPU time: 0.02u 0.00s 00:00:00.02 Elapsed: 00:00:00
"""
if __name__ == '__main__':
main()
| |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'DefaultSubscription'
db.create_table(u'notifications_defaultsubscription', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('transport', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['notifications.Transport'])),
('frequency', self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['notifications.SubscriptionFrequency'], null=True, blank=True)),
))
db.send_create_signal('notifications', ['DefaultSubscription'])
# Adding M2M table for field items on 'DefaultSubscription'
m2m_table_name = db.shorten_name(u'notifications_defaultsubscription_items')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('defaultsubscription', models.ForeignKey(orm['notifications.defaultsubscription'], null=False)),
('eventtype', models.ForeignKey(orm['notifications.eventtype'], null=False))
))
db.create_unique(m2m_table_name, ['defaultsubscription_id', 'eventtype_id'])
# Adding unique constraint on 'DefaultSubscription', fields ['transport']
db.create_unique(u'notifications_defaultsubscription', ['transport_id'])
# Adding model 'EventTypeCategory'
db.create_table(u'notifications_eventtypecategory', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('read_as', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('notifications', ['EventTypeCategory'])
# Adding field 'EventType.category'
db.add_column(u'notifications_eventtype', 'category',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['notifications.EventTypeCategory'], null=True),
keep_default=False)
def backwards(self, orm):
# Removing unique constraint on 'DefaultSubscription', fields ['transport']
db.delete_unique(u'notifications_defaultsubscription', ['transport_id'])
# Deleting model 'DefaultSubscription'
db.delete_table(u'notifications_defaultsubscription')
# Removing M2M table for field items on 'DefaultSubscription'
db.delete_table(db.shorten_name(u'notifications_defaultsubscription_items'))
# Deleting model 'EventTypeCategory'
db.delete_table(u'notifications_eventtypecategory')
# Deleting field 'EventType.category'
db.delete_column(u'notifications_eventtype', 'category_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'notifications.action': {
'Meta': {'object_name': 'Action'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'read_as': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'notifications.attendantrole': {
'Meta': {'unique_together': "(('role',),)", 'object_name': 'AttendantRole'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'priority': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'notifications.defaultsubscription': {
'Meta': {'unique_together': "(('transport',),)", 'object_name': 'DefaultSubscription'},
'frequency': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['notifications.SubscriptionFrequency']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'def_subs+'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['notifications.EventType']"}),
'transport': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['notifications.Transport']"})
},
'notifications.event': {
'Meta': {'object_name': 'Event'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'max_length': '500'}),
'extra_data': ('notifications.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'target_pk': ('django.db.models.fields.TextField', [], {}),
'target_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'event'", 'to': u"orm['contenttypes.ContentType']"}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['notifications.EventType']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'events'", 'to': u"orm['auth.User']"})
},
'notifications.eventattendantsconfig': {
'Meta': {'unique_together': "(('event_type', 'transport'),)", 'object_name': 'EventAttendantsConfig'},
'event_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['notifications.EventType']"}),
'get_attendants_methods': ('notifications.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'transport': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['notifications.Transport']"})
},
'notifications.eventobjectrole': {
'Meta': {'object_name': 'EventObjectRole'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'notifications.eventobjectrolerelation': {
'Meta': {'object_name': 'EventObjectRoleRelation'},
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['notifications.Event']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['notifications.EventObjectRole']"}),
'target_pk': ('django.db.models.fields.TextField', [], {}),
'target_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'eventobjectrolerelation'", 'to': u"orm['contenttypes.ContentType']"})
},
'notifications.eventtype': {
'Meta': {'object_name': 'EventType'},
'action': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['notifications.Action']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['notifications.EventTypeCategory']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'read_as': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'target_type': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'notifications.eventtypecategory': {
'Meta': {'object_name': 'EventTypeCategory'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'read_as': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'notifications.feeditem': {
'Meta': {'unique_together': "(('user', 'event', 'context'),)", 'object_name': 'FeedItem'},
'context': ('django.db.models.fields.CharField', [], {'default': "u'default'", 'max_length': '255'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['notifications.Event']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'seen': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'template_config': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['notifications.NotificationTemplateConfig']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'notifications.multiplenotificationtemplateconfig': {
'Meta': {'unique_together': "(('transport', 'context'),)", 'object_name': 'MultipleNotificationTemplateConfig'},
'context': ('django.db.models.fields.CharField', [], {'default': "u'default'", 'max_length': '255'}),
'data': ('notifications.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'multiple_template_path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'transport': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['notifications.Transport']"})
},
'notifications.notification': {
'Meta': {'unique_together': "(('user', 'event'),)", 'object_name': 'Notification'},
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['notifications.Event']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sent': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'template_config': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['notifications.NotificationTemplateConfig']"}),
'transport': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['notifications.Transport']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'notifications'", 'to': u"orm['auth.User']"})
},
'notifications.notificationtemplateconfig': {
'Meta': {'unique_together': "(('event_type', 'transport', 'context'),)", 'object_name': 'NotificationTemplateConfig'},
'context': ('django.db.models.fields.CharField', [], {'default': "u'default'", 'max_length': '255'}),
'data': ('notifications.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'event_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['notifications.EventType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'single_template_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'template_path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'transport': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['notifications.Transport']"})
},
'notifications.subscription': {
'Meta': {'object_name': 'Subscription'},
'frequency': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['notifications.SubscriptionFrequency']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'subs+'", 'symmetrical': 'False', 'to': "orm['notifications.EventType']"}),
'last_sent': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'transport': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['notifications.Transport']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'notifications.subscriptionfrequency': {
'Meta': {'object_name': 'SubscriptionFrequency'},
'delta': ('django.db.models.fields.CharField', [], {'default': "'0'", 'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'notifications.transport': {
'Meta': {'object_name': 'Transport'},
'allows_context': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'allows_freq_config': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cls': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'delete_sent': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['notifications']
| |
data = (
'[?]', # 0x00
'e', # 0x01
'aai', # 0x02
'i', # 0x03
'ii', # 0x04
'o', # 0x05
'oo', # 0x06
'oo', # 0x07
'ee', # 0x08
'i', # 0x09
'a', # 0x0a
'aa', # 0x0b
'we', # 0x0c
'we', # 0x0d
'wi', # 0x0e
'wi', # 0x0f
'wii', # 0x10
'wii', # 0x11
'wo', # 0x12
'wo', # 0x13
'woo', # 0x14
'woo', # 0x15
'woo', # 0x16
'wa', # 0x17
'wa', # 0x18
'waa', # 0x19
'waa', # 0x1a
'waa', # 0x1b
'ai', # 0x1c
'w', # 0x1d
'\'', # 0x1e
't', # 0x1f
'k', # 0x20
'sh', # 0x21
's', # 0x22
'n', # 0x23
'w', # 0x24
'n', # 0x25
'[?]', # 0x26
'w', # 0x27
'c', # 0x28
'?', # 0x29
'l', # 0x2a
'en', # 0x2b
'in', # 0x2c
'on', # 0x2d
'an', # 0x2e
'pe', # 0x2f
'paai', # 0x30
'pi', # 0x31
'pii', # 0x32
'po', # 0x33
'poo', # 0x34
'poo', # 0x35
'hee', # 0x36
'hi', # 0x37
'pa', # 0x38
'paa', # 0x39
'pwe', # 0x3a
'pwe', # 0x3b
'pwi', # 0x3c
'pwi', # 0x3d
'pwii', # 0x3e
'pwii', # 0x3f
'pwo', # 0x40
'pwo', # 0x41
'pwoo', # 0x42
'pwoo', # 0x43
'pwa', # 0x44
'pwa', # 0x45
'pwaa', # 0x46
'pwaa', # 0x47
'pwaa', # 0x48
'p', # 0x49
'p', # 0x4a
'h', # 0x4b
'te', # 0x4c
'taai', # 0x4d
'ti', # 0x4e
'tii', # 0x4f
'to', # 0x50
'too', # 0x51
'too', # 0x52
'dee', # 0x53
'di', # 0x54
'ta', # 0x55
'taa', # 0x56
'twe', # 0x57
'twe', # 0x58
'twi', # 0x59
'twi', # 0x5a
'twii', # 0x5b
'twii', # 0x5c
'two', # 0x5d
'two', # 0x5e
'twoo', # 0x5f
'twoo', # 0x60
'twa', # 0x61
'twa', # 0x62
'twaa', # 0x63
'twaa', # 0x64
'twaa', # 0x65
't', # 0x66
'tte', # 0x67
'tti', # 0x68
'tto', # 0x69
'tta', # 0x6a
'ke', # 0x6b
'kaai', # 0x6c
'ki', # 0x6d
'kii', # 0x6e
'ko', # 0x6f
'koo', # 0x70
'koo', # 0x71
'ka', # 0x72
'kaa', # 0x73
'kwe', # 0x74
'kwe', # 0x75
'kwi', # 0x76
'kwi', # 0x77
'kwii', # 0x78
'kwii', # 0x79
'kwo', # 0x7a
'kwo', # 0x7b
'kwoo', # 0x7c
'kwoo', # 0x7d
'kwa', # 0x7e
'kwa', # 0x7f
'kwaa', # 0x80
'kwaa', # 0x81
'kwaa', # 0x82
'k', # 0x83
'kw', # 0x84
'keh', # 0x85
'kih', # 0x86
'koh', # 0x87
'kah', # 0x88
'ce', # 0x89
'caai', # 0x8a
'ci', # 0x8b
'cii', # 0x8c
'co', # 0x8d
'coo', # 0x8e
'coo', # 0x8f
'ca', # 0x90
'caa', # 0x91
'cwe', # 0x92
'cwe', # 0x93
'cwi', # 0x94
'cwi', # 0x95
'cwii', # 0x96
'cwii', # 0x97
'cwo', # 0x98
'cwo', # 0x99
'cwoo', # 0x9a
'cwoo', # 0x9b
'cwa', # 0x9c
'cwa', # 0x9d
'cwaa', # 0x9e
'cwaa', # 0x9f
'cwaa', # 0xa0
'c', # 0xa1
'th', # 0xa2
'me', # 0xa3
'maai', # 0xa4
'mi', # 0xa5
'mii', # 0xa6
'mo', # 0xa7
'moo', # 0xa8
'moo', # 0xa9
'ma', # 0xaa
'maa', # 0xab
'mwe', # 0xac
'mwe', # 0xad
'mwi', # 0xae
'mwi', # 0xaf
'mwii', # 0xb0
'mwii', # 0xb1
'mwo', # 0xb2
'mwo', # 0xb3
'mwoo', # 0xb4
'mwoo', # 0xb5
'mwa', # 0xb6
'mwa', # 0xb7
'mwaa', # 0xb8
'mwaa', # 0xb9
'mwaa', # 0xba
'm', # 0xbb
'm', # 0xbc
'mh', # 0xbd
'm', # 0xbe
'm', # 0xbf
'ne', # 0xc0
'naai', # 0xc1
'ni', # 0xc2
'nii', # 0xc3
'no', # 0xc4
'noo', # 0xc5
'noo', # 0xc6
'na', # 0xc7
'naa', # 0xc8
'nwe', # 0xc9
'nwe', # 0xca
'nwa', # 0xcb
'nwa', # 0xcc
'nwaa', # 0xcd
'nwaa', # 0xce
'nwaa', # 0xcf
'n', # 0xd0
'ng', # 0xd1
'nh', # 0xd2
'le', # 0xd3
'laai', # 0xd4
'li', # 0xd5
'lii', # 0xd6
'lo', # 0xd7
'loo', # 0xd8
'loo', # 0xd9
'la', # 0xda
'laa', # 0xdb
'lwe', # 0xdc
'lwe', # 0xdd
'lwi', # 0xde
'lwi', # 0xdf
'lwii', # 0xe0
'lwii', # 0xe1
'lwo', # 0xe2
'lwo', # 0xe3
'lwoo', # 0xe4
'lwoo', # 0xe5
'lwa', # 0xe6
'lwa', # 0xe7
'lwaa', # 0xe8
'lwaa', # 0xe9
'l', # 0xea
'l', # 0xeb
'l', # 0xec
'se', # 0xed
'saai', # 0xee
'si', # 0xef
'sii', # 0xf0
'so', # 0xf1
'soo', # 0xf2
'soo', # 0xf3
'sa', # 0xf4
'saa', # 0xf5
'swe', # 0xf6
'swe', # 0xf7
'swi', # 0xf8
'swi', # 0xf9
'swii', # 0xfa
'swii', # 0xfb
'swo', # 0xfc
'swo', # 0xfd
'swoo', # 0xfe
'swoo', # 0xff
)
| |
from __future__ import unicode_literals
import datetime
from django.contrib import admin
from django.contrib.admin.models import LogEntry
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.admin.templatetags.admin_list import pagination
from django.contrib.admin.tests import AdminSeleniumTestCase
from django.contrib.admin.views.main import ALL_VAR, SEARCH_VAR, ChangeList
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.template import Context, Template
from django.test import TestCase, ignore_warnings, override_settings
from django.test.client import RequestFactory
from django.urls import reverse
from django.utils import formats, six
from django.utils.deprecation import RemovedInDjango20Warning
from .admin import (
BandAdmin, ChildAdmin, ChordsBandAdmin, ConcertAdmin,
CustomPaginationAdmin, CustomPaginator, DynamicListDisplayChildAdmin,
DynamicListDisplayLinksChildAdmin, DynamicListFilterChildAdmin,
DynamicSearchFieldsChildAdmin, EmptyValueChildAdmin, EventAdmin,
FilteredChildAdmin, GroupAdmin, InvitationAdmin,
NoListDisplayLinksParentAdmin, ParentAdmin, QuartetAdmin, SwallowAdmin,
site as custom_site,
)
from .models import (
Band, Child, ChordsBand, ChordsMusician, Concert, CustomIdUser, Event,
Genre, Group, Invitation, Membership, Musician, OrderedObject, Parent,
Quartet, Swallow, SwallowOneToOne, UnorderedObject,
)
def get_changelist_args(modeladmin, **kwargs):
m = modeladmin
args = (
kwargs.pop('list_display', m.list_display),
kwargs.pop('list_display_links', m.list_display_links),
kwargs.pop('list_filter', m.list_filter),
kwargs.pop('date_hierarchy', m.date_hierarchy),
kwargs.pop('search_fields', m.search_fields),
kwargs.pop('list_select_related', m.list_select_related),
kwargs.pop('list_per_page', m.list_per_page),
kwargs.pop('list_max_show_all', m.list_max_show_all),
kwargs.pop('list_editable', m.list_editable),
m,
)
assert not kwargs, "Unexpected kwarg %s" % kwargs
return args
@override_settings(ROOT_URLCONF="admin_changelist.urls")
class ChangeListTests(TestCase):
def setUp(self):
self.factory = RequestFactory()
def _create_superuser(self, username):
return User.objects.create_superuser(username=username, email='a@b.com', password='xxx')
def _mocked_authenticated_request(self, url, user):
request = self.factory.get(url)
request.user = user
return request
def test_select_related_preserved(self):
"""
Regression test for #10348: ChangeList.get_queryset() shouldn't
overwrite a custom select_related provided by ModelAdmin.get_queryset().
"""
m = ChildAdmin(Child, custom_site)
request = self.factory.get('/child/')
cl = ChangeList(
request, Child,
*get_changelist_args(m, list_select_related=m.get_list_select_related(request))
)
self.assertEqual(cl.queryset.query.select_related, {'parent': {}})
def test_select_related_as_tuple(self):
ia = InvitationAdmin(Invitation, custom_site)
request = self.factory.get('/invitation/')
cl = ChangeList(
request, Child,
*get_changelist_args(ia, list_select_related=ia.get_list_select_related(request))
)
self.assertEqual(cl.queryset.query.select_related, {'player': {}})
def test_select_related_as_empty_tuple(self):
ia = InvitationAdmin(Invitation, custom_site)
ia.list_select_related = ()
request = self.factory.get('/invitation/')
cl = ChangeList(
request, Child,
*get_changelist_args(ia, list_select_related=ia.get_list_select_related(request))
)
self.assertEqual(cl.queryset.query.select_related, False)
def test_get_select_related_custom_method(self):
class GetListSelectRelatedAdmin(admin.ModelAdmin):
list_display = ('band', 'player')
def get_list_select_related(self, request):
return ('band', 'player')
ia = GetListSelectRelatedAdmin(Invitation, custom_site)
request = self.factory.get('/invitation/')
cl = ChangeList(
request, Child,
*get_changelist_args(ia, list_select_related=ia.get_list_select_related(request))
)
self.assertEqual(cl.queryset.query.select_related, {'player': {}, 'band': {}})
def test_result_list_empty_changelist_value(self):
"""
Regression test for #14982: EMPTY_CHANGELIST_VALUE should be honored
for relationship fields
"""
new_child = Child.objects.create(name='name', parent=None)
request = self.factory.get('/child/')
m = ChildAdmin(Child, custom_site)
cl = ChangeList(request, Child, *get_changelist_args(m))
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = (
'<tbody><tr class="row1"><th class="field-name"><a href="%s">name</a></th>'
'<td class="field-parent nowrap">-</td></tr></tbody>' % link
)
self.assertNotEqual(table_output.find(row_html), -1, 'Failed to find expected row element: %s' % table_output)
def test_result_list_set_empty_value_display_on_admin_site(self):
"""
Test that empty value display can be set on AdminSite
"""
new_child = Child.objects.create(name='name', parent=None)
request = self.factory.get('/child/')
# Set a new empty display value on AdminSite.
admin.site.empty_value_display = '???'
m = ChildAdmin(Child, admin.site)
cl = ChangeList(request, Child, *get_changelist_args(m))
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = (
'<tbody><tr class="row1"><th class="field-name"><a href="%s">name</a></th>'
'<td class="field-parent nowrap">???</td></tr></tbody>' % link
)
self.assertNotEqual(table_output.find(row_html), -1, 'Failed to find expected row element: %s' % table_output)
def test_result_list_set_empty_value_display_in_model_admin(self):
"""
Test that empty value display can be set in ModelAdmin or individual fields.
"""
new_child = Child.objects.create(name='name', parent=None)
request = self.factory.get('/child/')
m = EmptyValueChildAdmin(Child, admin.site)
cl = ChangeList(request, Child, *get_changelist_args(m))
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = (
'<tbody><tr class="row1"><th class="field-name"><a href="%s">name</a></th>'
'<td class="field-age_display">&dagger;</td><td class="field-age">-empty-</td></tr></tbody>' % link
)
self.assertNotEqual(table_output.find(row_html), -1, 'Failed to find expected row element: %s' % table_output)
def test_result_list_html(self):
"""
Verifies that inclusion tag result_list generates a table when with
default ModelAdmin settings.
"""
new_parent = Parent.objects.create(name='parent')
new_child = Child.objects.create(name='name', parent=new_parent)
request = self.factory.get('/child/')
m = ChildAdmin(Child, custom_site)
cl = ChangeList(request, Child, *get_changelist_args(m))
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = (
'<tbody><tr class="row1"><th class="field-name"><a href="%s">name</a></th>'
'<td class="field-parent nowrap">Parent object</td></tr></tbody>' % link
)
self.assertNotEqual(table_output.find(row_html), -1, 'Failed to find expected row element: %s' % table_output)
def test_result_list_editable_html(self):
"""
Regression tests for #11791: Inclusion tag result_list generates a
table and this checks that the items are nested within the table
element tags.
Also a regression test for #13599, verifies that hidden fields
when list_editable is enabled are rendered in a div outside the
table.
"""
new_parent = Parent.objects.create(name='parent')
new_child = Child.objects.create(name='name', parent=new_parent)
request = self.factory.get('/child/')
m = ChildAdmin(Child, custom_site)
# Test with list_editable fields
m.list_display = ['id', 'name', 'parent']
m.list_display_links = ['id']
m.list_editable = ['name']
cl = ChangeList(request, Child, *get_changelist_args(m))
FormSet = m.get_changelist_formset(request)
cl.formset = FormSet(queryset=cl.result_list)
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
# make sure that hidden fields are in the correct place
hiddenfields_div = (
'<div class="hiddenfields">'
'<input type="hidden" name="form-0-id" value="%d" id="id_form-0-id" />'
'</div>'
) % new_child.id
self.assertInHTML(hiddenfields_div, table_output, msg_prefix='Failed to find hidden fields')
# make sure that list editable fields are rendered in divs correctly
editable_name_field = (
'<input name="form-0-name" value="name" class="vTextField" '
'maxlength="30" type="text" id="id_form-0-name" />'
)
self.assertInHTML(
'<td class="field-name">%s</td>' % editable_name_field,
table_output,
msg_prefix='Failed to find "name" list_editable field',
)
def test_result_list_editable(self):
"""
Regression test for #14312: list_editable with pagination
"""
new_parent = Parent.objects.create(name='parent')
for i in range(200):
Child.objects.create(name='name %s' % i, parent=new_parent)
request = self.factory.get('/child/', data={'p': -1}) # Anything outside range
m = ChildAdmin(Child, custom_site)
# Test with list_editable fields
m.list_display = ['id', 'name', 'parent']
m.list_display_links = ['id']
m.list_editable = ['name']
with self.assertRaises(IncorrectLookupParameters):
ChangeList(request, Child, *get_changelist_args(m))
@ignore_warnings(category=RemovedInDjango20Warning)
def test_result_list_with_allow_tags(self):
"""
Test for deprecation of allow_tags attribute
"""
new_parent = Parent.objects.create(name='parent')
for i in range(2):
Child.objects.create(name='name %s' % i, parent=new_parent)
request = self.factory.get('/child/')
m = ChildAdmin(Child, custom_site)
def custom_method(self, obj=None):
return 'Unsafe html <br />'
custom_method.allow_tags = True
# Add custom method with allow_tags attribute
m.custom_method = custom_method
m.list_display = ['id', 'name', 'parent', 'custom_method']
cl = ChangeList(request, Child, *get_changelist_args(m))
FormSet = m.get_changelist_formset(request)
cl.formset = FormSet(queryset=cl.result_list)
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
custom_field_html = '<td class="field-custom_method">Unsafe html <br /></td>'
self.assertInHTML(custom_field_html, table_output)
def test_custom_paginator(self):
new_parent = Parent.objects.create(name='parent')
for i in range(200):
Child.objects.create(name='name %s' % i, parent=new_parent)
request = self.factory.get('/child/')
m = CustomPaginationAdmin(Child, custom_site)
cl = ChangeList(request, Child, *get_changelist_args(m))
cl.get_results(request)
self.assertIsInstance(cl.paginator, CustomPaginator)
def test_distinct_for_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Basic ManyToMany.
"""
blues = Genre.objects.create(name='Blues')
band = Band.objects.create(name='B.B. King Review', nr_of_members=11)
band.genres.add(blues)
band.genres.add(blues)
m = BandAdmin(Band, custom_site)
request = self.factory.get('/band/', data={'genres': blues.pk})
cl = ChangeList(request, Band, *get_changelist_args(m))
cl.get_results(request)
# There's only one Group instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_through_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. With an intermediate model.
"""
lead = Musician.objects.create(name='Vox')
band = Group.objects.create(name='The Hype')
Membership.objects.create(group=band, music=lead, role='lead voice')
Membership.objects.create(group=band, music=lead, role='bass player')
m = GroupAdmin(Group, custom_site)
request = self.factory.get('/group/', data={'members': lead.pk})
cl = ChangeList(request, Group, *get_changelist_args(m))
cl.get_results(request)
# There's only one Group instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_through_m2m_at_second_level_in_list_filter(self):
"""
When using a ManyToMany in list_filter at the second level behind a
ForeignKey, distinct() must be called and results shouldn't appear more
than once.
"""
lead = Musician.objects.create(name='Vox')
band = Group.objects.create(name='The Hype')
Concert.objects.create(name='Woodstock', group=band)
Membership.objects.create(group=band, music=lead, role='lead voice')
Membership.objects.create(group=band, music=lead, role='bass player')
m = ConcertAdmin(Concert, custom_site)
request = self.factory.get('/concert/', data={'group__members': lead.pk})
cl = ChangeList(request, Concert, *get_changelist_args(m))
cl.get_results(request)
# There's only one Concert instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_inherited_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Model managed in the
admin inherits from the one that defins the relationship.
"""
lead = Musician.objects.create(name='John')
four = Quartet.objects.create(name='The Beatles')
Membership.objects.create(group=four, music=lead, role='lead voice')
Membership.objects.create(group=four, music=lead, role='guitar player')
m = QuartetAdmin(Quartet, custom_site)
request = self.factory.get('/quartet/', data={'members': lead.pk})
cl = ChangeList(request, Quartet, *get_changelist_args(m))
cl.get_results(request)
# There's only one Quartet instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_m2m_to_inherited_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Target of the relationship
inherits from another.
"""
lead = ChordsMusician.objects.create(name='Player A')
three = ChordsBand.objects.create(name='The Chords Trio')
Invitation.objects.create(band=three, player=lead, instrument='guitar')
Invitation.objects.create(band=three, player=lead, instrument='bass')
m = ChordsBandAdmin(ChordsBand, custom_site)
request = self.factory.get('/chordsband/', data={'members': lead.pk})
cl = ChangeList(request, ChordsBand, *get_changelist_args(m))
cl.get_results(request)
# There's only one ChordsBand instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_non_unique_related_object_in_list_filter(self):
"""
Regressions tests for #15819: If a field listed in list_filters
is a non-unique related object, distinct() must be called.
"""
parent = Parent.objects.create(name='Mary')
# Two children with the same name
Child.objects.create(parent=parent, name='Daniel')
Child.objects.create(parent=parent, name='Daniel')
m = ParentAdmin(Parent, custom_site)
request = self.factory.get('/parent/', data={'child__name': 'Daniel'})
cl = ChangeList(request, Parent, *get_changelist_args(m))
# Make sure distinct() was called
self.assertEqual(cl.queryset.count(), 1)
def test_distinct_for_non_unique_related_object_in_search_fields(self):
"""
Regressions tests for #15819: If a field listed in search_fields
is a non-unique related object, distinct() must be called.
"""
parent = Parent.objects.create(name='Mary')
Child.objects.create(parent=parent, name='Danielle')
Child.objects.create(parent=parent, name='Daniel')
m = ParentAdmin(Parent, custom_site)
request = self.factory.get('/parent/', data={SEARCH_VAR: 'daniel'})
cl = ChangeList(request, Parent, *get_changelist_args(m))
# Make sure distinct() was called
self.assertEqual(cl.queryset.count(), 1)
def test_distinct_for_many_to_many_at_second_level_in_search_fields(self):
"""
When using a ManyToMany in search_fields at the second level behind a
ForeignKey, distinct() must be called and results shouldn't appear more
than once.
"""
lead = Musician.objects.create(name='Vox')
band = Group.objects.create(name='The Hype')
Concert.objects.create(name='Woodstock', group=band)
Membership.objects.create(group=band, music=lead, role='lead voice')
Membership.objects.create(group=band, music=lead, role='bass player')
m = ConcertAdmin(Concert, custom_site)
request = self.factory.get('/concert/', data={SEARCH_VAR: 'vox'})
cl = ChangeList(request, Concert, *get_changelist_args(m))
# There's only one Concert instance
self.assertEqual(cl.queryset.count(), 1)
def test_pagination(self):
"""
Regression tests for #12893: Pagination in admins changelist doesn't
use queryset set by modeladmin.
"""
parent = Parent.objects.create(name='anything')
for i in range(30):
Child.objects.create(name='name %s' % i, parent=parent)
Child.objects.create(name='filtered %s' % i, parent=parent)
request = self.factory.get('/child/')
# Test default queryset
m = ChildAdmin(Child, custom_site)
cl = ChangeList(request, Child, *get_changelist_args(m))
self.assertEqual(cl.queryset.count(), 60)
self.assertEqual(cl.paginator.count, 60)
self.assertEqual(list(cl.paginator.page_range), [1, 2, 3, 4, 5, 6])
# Test custom queryset
m = FilteredChildAdmin(Child, custom_site)
cl = ChangeList(request, Child, *get_changelist_args(m))
self.assertEqual(cl.queryset.count(), 30)
self.assertEqual(cl.paginator.count, 30)
self.assertEqual(list(cl.paginator.page_range), [1, 2, 3])
def test_computed_list_display_localization(self):
"""
Regression test for #13196: output of functions should be localized
in the changelist.
"""
superuser = User.objects.create_superuser(username='super', email='super@localhost', password='secret')
self.client.force_login(superuser)
event = Event.objects.create(date=datetime.date.today())
response = self.client.get(reverse('admin:admin_changelist_event_changelist'))
self.assertContains(response, formats.localize(event.date))
self.assertNotContains(response, six.text_type(event.date))
def test_dynamic_list_display(self):
"""
Regression tests for #14206: dynamic list_display support.
"""
parent = Parent.objects.create(name='parent')
for i in range(10):
Child.objects.create(name='child %s' % i, parent=parent)
user_noparents = self._create_superuser('noparents')
user_parents = self._create_superuser('parents')
# Test with user 'noparents'
m = custom_site._registry[Child]
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertNotContains(response, 'Parent object')
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ['name', 'age'])
self.assertEqual(list_display_links, ['name'])
# Test with user 'parents'
m = DynamicListDisplayChildAdmin(Child, custom_site)
request = self._mocked_authenticated_request('/child/', user_parents)
response = m.changelist_view(request)
self.assertContains(response, 'Parent object')
custom_site.unregister(Child)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ('parent', 'name', 'age'))
self.assertEqual(list_display_links, ['parent'])
# Test default implementation
custom_site.register(Child, ChildAdmin)
m = custom_site._registry[Child]
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertContains(response, 'Parent object')
def test_show_all(self):
parent = Parent.objects.create(name='anything')
for i in range(30):
Child.objects.create(name='name %s' % i, parent=parent)
Child.objects.create(name='filtered %s' % i, parent=parent)
# Add "show all" parameter to request
request = self.factory.get('/child/', data={ALL_VAR: ''})
# Test valid "show all" request (number of total objects is under max)
m = ChildAdmin(Child, custom_site)
m.list_max_show_all = 200
# 200 is the max we'll pass to ChangeList
cl = ChangeList(request, Child, *get_changelist_args(m))
cl.get_results(request)
self.assertEqual(len(cl.result_list), 60)
# Test invalid "show all" request (number of total objects over max)
# falls back to paginated pages
m = ChildAdmin(Child, custom_site)
m.list_max_show_all = 30
# 30 is the max we'll pass to ChangeList for this test
cl = ChangeList(request, Child, *get_changelist_args(m))
cl.get_results(request)
self.assertEqual(len(cl.result_list), 10)
def test_dynamic_list_display_links(self):
"""
Regression tests for #16257: dynamic list_display_links support.
"""
parent = Parent.objects.create(name='parent')
for i in range(1, 10):
Child.objects.create(id=i, name='child %s' % i, parent=parent, age=i)
m = DynamicListDisplayLinksChildAdmin(Child, custom_site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/child/', superuser)
response = m.changelist_view(request)
for i in range(1, 10):
link = reverse('admin:admin_changelist_child_change', args=(i,))
self.assertContains(response, '<a href="%s">%s</a>' % (link, i))
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ('parent', 'name', 'age'))
self.assertEqual(list_display_links, ['age'])
def test_no_list_display_links(self):
"""#15185 -- Allow no links from the 'change list' view grid."""
p = Parent.objects.create(name='parent')
m = NoListDisplayLinksParentAdmin(Parent, custom_site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/parent/', superuser)
response = m.changelist_view(request)
link = reverse('admin:admin_changelist_parent_change', args=(p.pk,))
self.assertNotContains(response, '<a href="%s">' % link)
def test_tuple_list_display(self):
"""
Regression test for #17128
(ChangeList failing under Python 2.5 after r16319)
"""
swallow = Swallow.objects.create(origin='Africa', load='12.34', speed='22.2')
swallow2 = Swallow.objects.create(origin='Africa', load='12.34', speed='22.2')
swallow_o2o = SwallowOneToOne.objects.create(swallow=swallow2)
model_admin = SwallowAdmin(Swallow, custom_site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/swallow/', superuser)
response = model_admin.changelist_view(request)
# just want to ensure it doesn't blow up during rendering
self.assertContains(response, six.text_type(swallow.origin))
self.assertContains(response, six.text_type(swallow.load))
self.assertContains(response, six.text_type(swallow.speed))
# Reverse one-to-one relations should work.
self.assertContains(response, '<td class="field-swallowonetoone">-</td>')
self.assertContains(response, '<td class="field-swallowonetoone">%s</td>' % swallow_o2o)
def test_multiuser_edit(self):
"""
Simultaneous edits of list_editable fields on the changelist by
different users must not result in one user's edits creating a new
object instead of modifying the correct existing object (#11313).
"""
# To replicate this issue, simulate the following steps:
# 1. User1 opens an admin changelist with list_editable fields.
# 2. User2 edits object "Foo" such that it moves to another page in
# the pagination order and saves.
# 3. User1 edits object "Foo" and saves.
# 4. The edit made by User1 does not get applied to object "Foo" but
# instead is used to create a new object (bug).
# For this test, order the changelist by the 'speed' attribute and
# display 3 objects per page (SwallowAdmin.list_per_page = 3).
# Setup the test to reflect the DB state after step 2 where User2 has
# edited the first swallow object's speed from '4' to '1'.
a = Swallow.objects.create(origin='Swallow A', load=4, speed=1)
b = Swallow.objects.create(origin='Swallow B', load=2, speed=2)
c = Swallow.objects.create(origin='Swallow C', load=5, speed=5)
d = Swallow.objects.create(origin='Swallow D', load=9, speed=9)
superuser = self._create_superuser('superuser')
self.client.force_login(superuser)
changelist_url = reverse('admin:admin_changelist_swallow_changelist')
# Send the POST from User1 for step 3. It's still using the changelist
# ordering from before User2's edits in step 2.
data = {
'form-TOTAL_FORMS': '3',
'form-INITIAL_FORMS': '3',
'form-MIN_NUM_FORMS': '0',
'form-MAX_NUM_FORMS': '1000',
'form-0-id': str(d.pk),
'form-1-id': str(c.pk),
'form-2-id': str(a.pk),
'form-0-load': '9.0',
'form-0-speed': '9.0',
'form-1-load': '5.0',
'form-1-speed': '5.0',
'form-2-load': '5.0',
'form-2-speed': '4.0',
'_save': 'Save',
}
response = self.client.post(changelist_url, data, follow=True, extra={'o': '-2'})
# The object User1 edited in step 3 is displayed on the changelist and
# has the correct edits applied.
self.assertContains(response, '1 swallow was changed successfully.')
self.assertContains(response, a.origin)
a.refresh_from_db()
self.assertEqual(a.load, float(data['form-2-load']))
self.assertEqual(a.speed, float(data['form-2-speed']))
b.refresh_from_db()
self.assertEqual(b.load, 2)
self.assertEqual(b.speed, 2)
c.refresh_from_db()
self.assertEqual(c.load, float(data['form-1-load']))
self.assertEqual(c.speed, float(data['form-1-speed']))
d.refresh_from_db()
self.assertEqual(d.load, float(data['form-0-load']))
self.assertEqual(d.speed, float(data['form-0-speed']))
# No new swallows were created.
self.assertEqual(len(Swallow.objects.all()), 4)
def test_deterministic_order_for_unordered_model(self):
"""
Ensure that the primary key is systematically used in the ordering of
the changelist's results to guarantee a deterministic order, even
when the Model doesn't have any default ordering defined.
Refs #17198.
"""
superuser = self._create_superuser('superuser')
for counter in range(1, 51):
UnorderedObject.objects.create(id=counter, bool=True)
class UnorderedObjectAdmin(admin.ModelAdmin):
list_per_page = 10
def check_results_order(ascending=False):
custom_site.register(UnorderedObject, UnorderedObjectAdmin)
model_admin = UnorderedObjectAdmin(UnorderedObject, custom_site)
counter = 0 if ascending else 51
for page in range(0, 5):
request = self._mocked_authenticated_request('/unorderedobject/?p=%s' % page, superuser)
response = model_admin.changelist_view(request)
for result in response.context_data['cl'].result_list:
counter += 1 if ascending else -1
self.assertEqual(result.id, counter)
custom_site.unregister(UnorderedObject)
# When no order is defined at all, everything is ordered by '-pk'.
check_results_order()
# When an order field is defined but multiple records have the same
# value for that field, make sure everything gets ordered by -pk as well.
UnorderedObjectAdmin.ordering = ['bool']
check_results_order()
# When order fields are defined, including the pk itself, use them.
UnorderedObjectAdmin.ordering = ['bool', '-pk']
check_results_order()
UnorderedObjectAdmin.ordering = ['bool', 'pk']
check_results_order(ascending=True)
UnorderedObjectAdmin.ordering = ['-id', 'bool']
check_results_order()
UnorderedObjectAdmin.ordering = ['id', 'bool']
check_results_order(ascending=True)
def test_deterministic_order_for_model_ordered_by_its_manager(self):
"""
Ensure that the primary key is systematically used in the ordering of
the changelist's results to guarantee a deterministic order, even
when the Model has a manager that defines a default ordering.
Refs #17198.
"""
superuser = self._create_superuser('superuser')
for counter in range(1, 51):
OrderedObject.objects.create(id=counter, bool=True, number=counter)
class OrderedObjectAdmin(admin.ModelAdmin):
list_per_page = 10
def check_results_order(ascending=False):
custom_site.register(OrderedObject, OrderedObjectAdmin)
model_admin = OrderedObjectAdmin(OrderedObject, custom_site)
counter = 0 if ascending else 51
for page in range(0, 5):
request = self._mocked_authenticated_request('/orderedobject/?p=%s' % page, superuser)
response = model_admin.changelist_view(request)
for result in response.context_data['cl'].result_list:
counter += 1 if ascending else -1
self.assertEqual(result.id, counter)
custom_site.unregister(OrderedObject)
# When no order is defined at all, use the model's default ordering (i.e. 'number')
check_results_order(ascending=True)
# When an order field is defined but multiple records have the same
# value for that field, make sure everything gets ordered by -pk as well.
OrderedObjectAdmin.ordering = ['bool']
check_results_order()
# When order fields are defined, including the pk itself, use them.
OrderedObjectAdmin.ordering = ['bool', '-pk']
check_results_order()
OrderedObjectAdmin.ordering = ['bool', 'pk']
check_results_order(ascending=True)
OrderedObjectAdmin.ordering = ['-id', 'bool']
check_results_order()
OrderedObjectAdmin.ordering = ['id', 'bool']
check_results_order(ascending=True)
def test_dynamic_list_filter(self):
"""
Regression tests for ticket #17646: dynamic list_filter support.
"""
parent = Parent.objects.create(name='parent')
for i in range(10):
Child.objects.create(name='child %s' % i, parent=parent)
user_noparents = self._create_superuser('noparents')
user_parents = self._create_superuser('parents')
# Test with user 'noparents'
m = DynamicListFilterChildAdmin(Child, custom_site)
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertEqual(response.context_data['cl'].list_filter, ['name', 'age'])
# Test with user 'parents'
m = DynamicListFilterChildAdmin(Child, custom_site)
request = self._mocked_authenticated_request('/child/', user_parents)
response = m.changelist_view(request)
self.assertEqual(response.context_data['cl'].list_filter, ('parent', 'name', 'age'))
def test_dynamic_search_fields(self):
child = self._create_superuser('child')
m = DynamicSearchFieldsChildAdmin(Child, custom_site)
request = self._mocked_authenticated_request('/child/', child)
response = m.changelist_view(request)
self.assertEqual(response.context_data['cl'].search_fields, ('name', 'age'))
def test_pagination_page_range(self):
"""
Regression tests for ticket #15653: ensure the number of pages
generated for changelist views are correct.
"""
# instantiating and setting up ChangeList object
m = GroupAdmin(Group, custom_site)
request = self.factory.get('/group/')
cl = ChangeList(request, Group, *get_changelist_args(m))
per_page = cl.list_per_page = 10
for page_num, objects_count, expected_page_range in [
(0, per_page, []),
(0, per_page * 2, list(range(2))),
(5, per_page * 11, list(range(11))),
(5, per_page * 12, [0, 1, 2, 3, 4, 5, 6, 7, 8, '.', 10, 11]),
(6, per_page * 12, [0, 1, '.', 3, 4, 5, 6, 7, 8, 9, 10, 11]),
(6, per_page * 13, [0, 1, '.', 3, 4, 5, 6, 7, 8, 9, '.', 11, 12]),
]:
# assuming we have exactly `objects_count` objects
Group.objects.all().delete()
for i in range(objects_count):
Group.objects.create(name='test band')
# setting page number and calculating page range
cl.page_num = page_num
cl.get_results(request)
real_page_range = pagination(cl)['page_range']
self.assertListEqual(
expected_page_range,
list(real_page_range),
)
def test_object_tools_displayed_no_add_permission(self):
"""
When ModelAdmin.has_add_permission() returns False, the object-tools
block is still shown.
"""
superuser = self._create_superuser('superuser')
m = EventAdmin(Event, custom_site)
request = self._mocked_authenticated_request('/event/', superuser)
self.assertFalse(m.has_add_permission(request))
response = m.changelist_view(request)
self.assertIn('<ul class="object-tools">', response.rendered_content)
# The "Add" button inside the object-tools shouldn't appear.
self.assertNotIn('Add ', response.rendered_content)
class AdminLogNodeTestCase(TestCase):
def test_get_admin_log_templatetag_custom_user(self):
"""
Regression test for ticket #20088: admin log depends on User model
having id field as primary key.
The old implementation raised an AttributeError when trying to use
the id field.
"""
context = Context({'user': CustomIdUser()})
template_string = '{% load log %}{% get_admin_log 10 as admin_log for_user user %}'
template = Template(template_string)
# Rendering should be u'' since this templatetag just logs,
# it doesn't render any string.
self.assertEqual(template.render(context), '')
def test_get_admin_log_templatetag_no_user(self):
"""
The {% get_admin_log %} tag should work without specifying a user.
"""
user = User(username='jondoe', password='secret', email='super@example.com')
user.save()
ct = ContentType.objects.get_for_model(User)
LogEntry.objects.log_action(user.pk, ct.pk, user.pk, repr(user), 1)
t = Template(
'{% load log %}'
'{% get_admin_log 100 as admin_log %}'
'{% for entry in admin_log %}'
'{{ entry|safe }}'
'{% endfor %}'
)
self.assertEqual(t.render(Context({})), 'Added "<User: jondoe>".')
@override_settings(ROOT_URLCONF='admin_changelist.urls')
class SeleniumTests(AdminSeleniumTestCase):
available_apps = ['admin_changelist'] + AdminSeleniumTestCase.available_apps
def setUp(self):
User.objects.create_superuser(username='super', password='secret', email=None)
def test_add_row_selection(self):
"""
Ensure that the status line for selected rows gets updated correctly (#22038)
"""
self.admin_login(username='super', password='secret')
self.selenium.get(self.live_server_url + reverse('admin:auth_user_changelist'))
form_id = '#changelist-form'
# Test amount of rows in the Changelist
rows = self.selenium.find_elements_by_css_selector(
'%s #result_list tbody tr' % form_id)
self.assertEqual(len(rows), 1)
# Test current selection
selection_indicator = self.selenium.find_element_by_css_selector(
'%s .action-counter' % form_id)
self.assertEqual(selection_indicator.text, "0 of 1 selected")
# Select a row and check again
row_selector = self.selenium.find_element_by_css_selector(
'%s #result_list tbody tr:first-child .action-select' % form_id)
row_selector.click()
self.assertEqual(selection_indicator.text, "1 of 1 selected")
| |
# -*- coding: utf-8 -*-
from xxhash import xxh64_intdigest
from iscc_bench.algos.slide import sliding_window
from iscc_bench.textid.normalize import text_normalize
import numpy as np
R = np.random.RandomState(seed=74)
MASKS = R.randint(0, 2 ** 64 - 1, 64, dtype=np.uint64)
HEAD_CID_T = b"\x10"
HEAD_CID_T_PCF = b"\x11"
SYMBOLS = "C23456789rB1ZEFGTtYiAaVvMmHUPWXKDNbcdefghLjkSnopRqsJuQwxyz"
VALUES = "".join([chr(i) for i in range(58)])
C2VTABLE = str.maketrans(SYMBOLS, VALUES)
V2CTABLE = str.maketrans(VALUES, SYMBOLS)
WINDOW_SIZE_CID_T = 9
MINHASH_PERMUTATIONS = [
[
1465573649454982733,
50671401556847464,
464454191238539071,
79121974684475339,
1204172773865074508,
1599360705907791122,
1277912255369958294,
160110214425958988,
467753012659127075,
1846500734868477684,
337877366765600783,
1910046430333214712,
432991213508534293,
458494058485982523,
488627824275561651,
838108366281102774,
56421635900613245,
41152469170208093,
81795297597986189,
1655634145464649340,
625354851427597534,
2204825841199416657,
2234919984064510783,
1013888525398624866,
1955287890196929674,
446634993784978105,
754698625499853375,
222042093077244875,
871379034919191540,
284785552184080815,
543192509429361466,
1161746297560325877,
1960287370909864238,
1846445530880870657,
1469786218271285891,
1448922133907529478,
399602985777301894,
2022057865440131188,
1017153326444367824,
606183561047677968,
1598320042930326758,
1116499924437966538,
1125748350977128481,
27225217426126585,
384261112471101066,
1033966260045922389,
960201057416530170,
251241047872847492,
1271075573485035961,
2163496639851351130,
236290923696725726,
1814667029972018154,
470006580483358731,
754988189928330202,
1724648305364661516,
436487060460267949,
695653806960763463,
2150760309693273629,
924044207510448651,
149809514353780767,
1314612118644492866,
1755300371282918056,
518697226054308549,
1569254193062057079,
],
[
1237857379018613025,
1001921070941957488,
656622217376943453,
1238551308896951561,
1331489939266224632,
356372363153075238,
502039359164234437,
2244189427579536420,
2261264705847976303,
520021327101242735,
2248861447464088572,
1575238347461123506,
1977164277640575068,
443328659258048693,
2164775033633161402,
422864362428985564,
1013886767037446358,
2031519161666131866,
960810283122122980,
915146140710507812,
1650364839972853328,
417710596676127062,
971941928151168238,
112835823272522671,
875463099753474542,
2103760042641721929,
1174817080219344992,
956718707065671240,
504714530717344612,
698866224796044997,
772686875752263236,
508124985106117155,
32050429651149070,
2050193692902100504,
113320117099579589,
1157406858255878787,
958938521872285562,
1690655700081062480,
654710156142217420,
1374581228985750872,
2293273111845819604,
584158434721485467,
1697469239658036408,
1631417643814704636,
759939753303206598,
730557314242689590,
514488987075693066,
758386722945284999,
205448532149163355,
1955950223882840638,
2178239790474234865,
2150432790731678261,
525986446085794211,
243818715676446213,
1448695519298583067,
787612676545388580,
1808140070502055045,
1935597324863623502,
990087375750577947,
238222040856000242,
902438855124964046,
1573311567944127103,
1685584091916149959,
469424979690030816,
],
]
def content_id_text(text, partial=False):
text = text_normalize(text)
shingles = ("".join(s) for s in sliding_window(text, WINDOW_SIZE_CID_T, 1))
# 5. Create 32-bit features with xxHash32
features = [xxh64_intdigest(s.encode("utf8")) for s in shingles]
# 6. Apply minimum_hash
minhash = minimum_hash(features)
# # 7. Collect least significant bits
lsb = "".join([str(x & 1) for x in minhash])
# 8. Create two 64-bit digests
digest = int(lsb, 2).to_bytes(8, "big", signed=False)
if partial:
content_id_text_digest = HEAD_CID_T_PCF + digest
else:
content_id_text_digest = HEAD_CID_T + digest
# 11. Encode and return
return encode(content_id_text_digest)
def minimum_hash(features):
hashes = np.empty(64, dtype=np.uint64)
hashes.fill(2 ** 64 - 1)
for f in features:
hashes = np.minimum(hashes, np.bitwise_xor(MASKS, f))
return hashes.tolist()
def minimum_hash_text(text, w=13):
chunks = ("".join(c) for c in sliding_window(text, w))
features = (xxh64_intdigest(s.encode("utf8")) for s in chunks)
hashes = np.empty(64, dtype=np.uint64)
hashes.fill(2 ** 64 - 1)
for f in features:
hashes = np.minimum(hashes, np.bitwise_xor(MASKS, f))
return hashes.tolist()
def similarity_hash(hash_digests):
n_bytes = 8
n_bits = 64
vector = [0] * n_bits
for h in hash_digests:
for i in range(n_bits):
vector[i] += h & 1
h >>= 1
minfeatures = len(hash_digests) * 1.0 / 2
shash = 0
for i in range(n_bits):
shash |= int(vector[i] >= minfeatures) << i
return shash.to_bytes(n_bytes, "big", signed=False)
def encode(digest):
if len(digest) == 9:
return encode(digest[:1]) + encode(digest[1:])
assert len(digest) in (1, 8), "Digest must be 1, 8 or 9 bytes long"
digest = reversed(digest)
value = 0
numvalues = 1
for octet in digest:
octet *= numvalues
value += octet
numvalues *= 256
chars = []
while numvalues > 0:
chars.append(value % 58)
value //= 58
numvalues //= 58
return str.translate("".join([chr(c) for c in reversed(chars)]), V2CTABLE)
if __name__ == "__main__":
print(content_id_text("Hello World"))
| |
###############################################################################
##
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
""" This file contains a dialog for editing options for how the given
VisTrails module is executed.
QModuleOptions
"""
from PyQt4 import QtCore, QtGui
from vistrails.core.vistrail.module_control_param import ModuleControlParam
from vistrails.gui.theme import CurrentTheme
from vistrails.gui.vistrails_palette import QVistrailsPaletteInterface
import json
import unittest
###############################################################################
class QModuleOptions(QtGui.QDialog, QVistrailsPaletteInterface):
"""
QModuleIteration is a dialog for editing module looping options.
"""
def __init__(self, parent=None):
"""
QModuleIteration(parent)
-> None
"""
QtGui.QDialog.__init__(self, parent)
self.setWindowTitle("Module Execution Options")
self.createButtons()
self.update_module()
def createButtons(self):
""" createButtons() -> None
Create and connect signals to Ok & Cancel button
"""
self.controller = None
self.state_changed = False
self.module = None
self.setLayout(QtGui.QVBoxLayout())
# self.layout().addStrut()
layout = QtGui.QHBoxLayout()
type_group = QtGui.QButtonGroup(self) # Number group
layout.addWidget(QtGui.QLabel("Port list combination method:"))
self.pairwiseButton = QtGui.QRadioButton("Pairwise")
self.pairwiseButton.setToolTip("Execute multiple looped input ports pairwise:"
" [(A, B), (C, D)] -> [(A, C), (B, D)]")
type_group.addButton(self.pairwiseButton)
layout.addWidget(self.pairwiseButton)
layout.setStretch(0, 0)
self.cartesianButton = QtGui.QRadioButton("Cartesian")
self.cartesianButton.setToolTip("Execute multiple looped input ports using cartesian product:"
" [(A, B), (C, D)] -> [(A, C), (A, D), (B, C), (B, D)]")
self.cartesianButton.setChecked(True)
type_group.addButton(self.cartesianButton)
layout.addWidget(self.cartesianButton)
layout.setStretch(1, 0)
self.customButton = QtGui.QRadioButton("Custom")
self.customButton.setToolTip("Build a custom combination using pairwise/cartesian functions")
type_group.addButton(self.customButton)
layout.addWidget(self.customButton)
layout.setStretch(2, 0)
layout.addStretch(1)
self.layout().addLayout(layout)
self.layout().setStretch(0, 0)
self.portCombiner = QPortCombineTreeWidget()
self.layout().addWidget(self.portCombiner)
self.portCombiner.setVisible(False)
whileLayout = QtGui.QVBoxLayout()
self.whileButton = QtGui.QCheckBox("While Loop")
self.whileButton.setToolTip('Repeatedly execute module until a specified output port has a false value')
whileLayout.addWidget(self.whileButton)
whileLayout.setStretch(0, 0)
layout = QtGui.QHBoxLayout()
self.condLabel = QtGui.QLabel("Condition output port:")
layout.addWidget(self.condLabel)
layout.setStretch(0, 0)
self.condEdit = QtGui.QLineEdit()
self.condEdit.setToolTip('Name of output port containing the condition of the loop')
layout.addWidget(self.condEdit)
layout.setStretch(1, 1)
whileLayout.addLayout(layout)
whileLayout.setStretch(1, 0)
layout = QtGui.QHBoxLayout()
self.maxLabel = QtGui.QLabel("Max iterations:")
layout.addWidget(self.maxLabel)
layout.setStretch(0, 0)
self.maxEdit = QtGui.QLineEdit()
self.maxEdit.setValidator(QtGui.QIntValidator())
self.maxEdit.setToolTip('Fail after this number of iterations have been reached (default=20)')
layout.addWidget(self.maxEdit)
layout.setStretch(1, 1)
whileLayout.addLayout(layout)
whileLayout.setStretch(2, 0)
layout = QtGui.QHBoxLayout()
self.delayLabel = QtGui.QLabel("Delay:")
layout.addWidget(self.delayLabel)
layout.setStretch(0, 0)
self.delayEdit = QtGui.QLineEdit()
self.delayEdit.setValidator(QtGui.QDoubleValidator(self))
self.delayEdit.setToolTip('Delay between iterations in fractions of seconds')
layout.addWidget(self.delayEdit)
layout.setStretch(1, 1)
whileLayout.addLayout(layout)
whileLayout.setStretch(2, 0)
layout = QtGui.QHBoxLayout()
self.feedInputLabel = QtGui.QLabel("Feedback Input port:")
layout.addWidget(self.feedInputLabel)
layout.setStretch(0, 0)
self.feedInputEdit = QtGui.QLineEdit()
self.feedInputEdit.setToolTip('Name of input port to feed the value from last iteration')
layout.addWidget(self.feedInputEdit)
layout.setStretch(1, 1)
whileLayout.addLayout(layout)
whileLayout.setStretch(3, 0)
layout = QtGui.QHBoxLayout()
self.feedOutputLabel = QtGui.QLabel("Feedback Output port:")
layout.addWidget(self.feedOutputLabel)
layout.setStretch(0, 0)
self.feedOutputEdit = QtGui.QLineEdit()
self.feedOutputEdit.setToolTip('Name of output port to feed to next iteration')
layout.addWidget(self.feedOutputEdit)
layout.setStretch(1, 1)
whileLayout.addLayout(layout)
whileLayout.setStretch(4, 0)
whileLayout.addStretch(1)
self.layout().addLayout(whileLayout)
self.jobCacheButton = QtGui.QCheckBox("Cache Output Persistently")
self.jobCacheButton.setToolTip('Cache the module results persistently to disk. (outputs must be constants)')
self.layout().addWidget(self.jobCacheButton)
self.layout().setStretch(2, 0)
self.layout().addStretch(1)
self.buttonLayout = QtGui.QHBoxLayout()
self.buttonLayout.setMargin(5)
self.saveButton = QtGui.QPushButton('&Save', self)
self.saveButton.setFixedWidth(100)
self.saveButton.setEnabled(False)
self.buttonLayout.addWidget(self.saveButton)
self.resetButton = QtGui.QPushButton('&Reset', self)
self.resetButton.setFixedWidth(100)
self.resetButton.setEnabled(False)
self.buttonLayout.addWidget(self.resetButton)
self.layout().addLayout(self.buttonLayout)
self.connect(self.saveButton, QtCore.SIGNAL('clicked(bool)'),
self.saveTriggered)
self.connect(self.resetButton, QtCore.SIGNAL('clicked(bool)'),
self.resetTriggered)
self.layout().setStretch(3, 0)
self.update_module()
self.pairwiseButton.toggled.connect(self.stateChanged)
self.cartesianButton.toggled.connect(self.stateChanged)
self.customButton.toggled.connect(self.stateChanged)
self.customButton.toggled.connect(self.customToggled)
self.portCombiner.itemChanged.connect(self.stateChanged)
self.whileButton.toggled.connect(self.stateChanged)
self.whileButton.toggled.connect(self.whileToggled)
self.condEdit.textChanged.connect(self.stateChanged)
self.maxEdit.textChanged.connect(self.stateChanged)
self.delayEdit.textChanged.connect(self.stateChanged)
self.feedInputEdit.textChanged.connect(self.stateChanged)
self.feedOutputEdit.textChanged.connect(self.stateChanged)
self.jobCacheButton.toggled.connect(self.stateChanged)
def sizeHint(self):
""" sizeHint() -> QSize
Return the recommended size of the configuration window
"""
return QtCore.QSize(512, 256)
def saveTriggered(self, checked = False):
""" saveTriggered(checked: bool) -> None
Update vistrail controller and module when the user click Ok
"""
if self.updateVistrail():
self.saveButton.setEnabled(False)
self.resetButton.setEnabled(False)
self.state_changed = False
self.emit(QtCore.SIGNAL("stateChanged"))
self.emit(QtCore.SIGNAL('doneConfigure'), self.module.id)
def resetTriggered(self, checked = False):
self.update_module(self.module)
def stateChanged(self, state=False, other=None):
self.saveButton.setEnabled(True)
self.resetButton.setEnabled(True)
self.state_changed = True
def customToggled(self, state=False):
self.portCombiner.setVisible(state)
def whileToggled(self, state=False):
if state:
self.condEdit.setVisible(True)
self.maxEdit.setVisible(True)
self.delayEdit.setVisible(True)
self.feedInputEdit.setVisible(True)
self.feedOutputEdit.setVisible(True)
self.condLabel.setVisible(True)
self.maxLabel.setVisible(True)
self.delayLabel.setVisible(True)
self.feedInputLabel.setVisible(True)
self.feedOutputLabel.setVisible(True)
self.condEdit.setText('')
self.maxEdit.setText('')
self.delayEdit.setText('')
self.feedInputEdit.setText('')
self.feedOutputEdit.setText('')
else:
self.condEdit.setVisible(False)
self.maxEdit.setVisible(False)
self.delayEdit.setVisible(False)
self.feedInputEdit.setVisible(False)
self.feedOutputEdit.setVisible(False)
self.condLabel.setVisible(False)
self.maxLabel.setVisible(False)
self.delayLabel.setVisible(False)
self.feedInputLabel.setVisible(False)
self.feedOutputLabel.setVisible(False)
def closeEvent(self, event):
self.askToSaveChanges()
event.accept()
def set_controller(self, controller):
self.controller = controller
if not controller:
return
scene = controller.current_pipeline_scene
selected_ids = scene.get_selected_module_ids()
modules = [controller.current_pipeline.modules[i]
for i in selected_ids]
if len(modules) == 1:
self.update_module(modules[0])
else:
self.update_module(None)
def update_module(self, module=None):
self.module = module
if not module:
self.pairwiseButton.setEnabled(False)
self.cartesianButton.setEnabled(False)
self.customButton.setEnabled(False)
self.whileButton.setEnabled(False)
self.condEdit.setVisible(False)
self.maxEdit.setVisible(False)
self.delayEdit.setVisible(False)
self.feedInputEdit.setVisible(False)
self.feedOutputEdit.setVisible(False)
self.condLabel.setVisible(False)
self.maxLabel.setVisible(False)
self.delayLabel.setVisible(False)
self.feedInputLabel.setVisible(False)
self.feedOutputLabel.setVisible(False)
self.portCombiner.setVisible(False)
self.jobCacheButton.setEnabled(False)
self.state_changed = False
self.saveButton.setEnabled(False)
self.resetButton.setEnabled(False)
return
# set defaults
self.pairwiseButton.setEnabled(True)
self.cartesianButton.setEnabled(True)
self.cartesianButton.setChecked(True)
self.customButton.setEnabled(True)
self.whileButton.setEnabled(True)
self.whileButton.setChecked(False)
self.condEdit.setVisible(False)
self.maxEdit.setVisible(False)
self.delayEdit.setVisible(False)
self.feedInputEdit.setVisible(False)
self.feedOutputEdit.setVisible(False)
self.condLabel.setVisible(False)
self.maxLabel.setVisible(False)
self.delayLabel.setVisible(False)
self.feedInputLabel.setVisible(False)
self.feedOutputLabel.setVisible(False)
self.portCombiner.setVisible(False)
self.portCombiner.setDefault(module)
self.jobCacheButton.setEnabled(True)
self.jobCacheButton.setChecked(False)
if module.has_control_parameter_with_name(ModuleControlParam.LOOP_KEY):
type = module.get_control_parameter_by_name(ModuleControlParam.LOOP_KEY).value
self.pairwiseButton.setChecked(type=='pairwise')
self.cartesianButton.setChecked(type=='cartesian')
self.customButton.setChecked(type not in ['pairwise', 'cartesian'])
self.portCombiner.setVisible(type not in ['pairwise', 'cartesian'])
if type not in ['pairwise', 'cartesian']:
self.portCombiner.setValue(type)
if module.has_control_parameter_with_name(ModuleControlParam.WHILE_COND_KEY) or \
module.has_control_parameter_with_name(ModuleControlParam.WHILE_MAX_KEY):
self.whileButton.setChecked(True)
if module.has_control_parameter_with_name(ModuleControlParam.WHILE_COND_KEY):
cond = module.get_control_parameter_by_name(ModuleControlParam.WHILE_COND_KEY).value
self.condEdit.setText(cond)
if module.has_control_parameter_with_name(ModuleControlParam.WHILE_MAX_KEY):
max = module.get_control_parameter_by_name(ModuleControlParam.WHILE_MAX_KEY).value
self.maxEdit.setText(max)
if module.has_control_parameter_with_name(ModuleControlParam.WHILE_DELAY_KEY):
delay = module.get_control_parameter_by_name(ModuleControlParam.WHILE_DELAY_KEY).value
self.delayEdit.setText(delay)
if module.has_control_parameter_with_name(ModuleControlParam.WHILE_INPUT_KEY):
input = module.get_control_parameter_by_name(ModuleControlParam.WHILE_INPUT_KEY).value
self.feedInputEdit.setText(input)
if module.has_control_parameter_with_name(ModuleControlParam.WHILE_OUTPUT_KEY):
output = module.get_control_parameter_by_name(ModuleControlParam.WHILE_OUTPUT_KEY).value
self.feedOutputEdit.setText(output)
if module.has_control_parameter_with_name(ModuleControlParam.JOB_CACHE_KEY):
jobCache = module.get_control_parameter_by_name(ModuleControlParam.JOB_CACHE_KEY).value
self.jobCacheButton.setChecked(jobCache.lower()=='true')
self.state_changed = False
self.saveButton.setEnabled(False)
self.resetButton.setEnabled(False)
def updateVistrail(self):
values = []
if self.pairwiseButton.isChecked():
value = 'pairwise'
elif self.cartesianButton.isChecked():
value = 'cartesian'
else:
value = self.portCombiner.getValue()
values.append((ModuleControlParam.LOOP_KEY, value))
_while = self.whileButton.isChecked()
values.append((ModuleControlParam.WHILE_COND_KEY,
_while and self.condEdit.text()))
values.append((ModuleControlParam.WHILE_MAX_KEY,
_while and self.maxEdit.text()))
values.append((ModuleControlParam.WHILE_DELAY_KEY,
_while and self.delayEdit.text()))
values.append((ModuleControlParam.WHILE_INPUT_KEY,
_while and self.feedInputEdit.text()))
values.append((ModuleControlParam.WHILE_OUTPUT_KEY,
_while and self.feedOutputEdit.text()))
jobCache = self.jobCacheButton.isChecked()
values.append((ModuleControlParam.JOB_CACHE_KEY,
[False, 'true'][jobCache]))
for name, value in values:
if value:
if (not self.module.has_control_parameter_with_name(name) or
value != self.module.get_control_parameter_by_name(name).value):
if self.module.has_control_parameter_with_name(name):
self.controller.delete_control_parameter(name,
self.module.id)
self.controller.add_control_parameter((name, value),
self.module.id)
elif self.module.has_control_parameter_with_name(name):
self.controller.delete_control_parameter(name, self.module.id)
return True
def activate(self):
if self.isVisible() == False:
self.show()
self.activateWindow()
PORTITEM = 1000
DOTITEM = 1001
CROSSITEM = 1002
class PortItem(QtGui.QTreeWidgetItem):
def __init__(self, port_name, parent=None):
QtGui.QTreeWidgetItem.__init__(self, parent, PORTITEM)
self.setText(0, port_name)
self.setFlags(self.flags() & ~QtCore.Qt.ItemIsDropEnabled)
class DotItem(QtGui.QTreeWidgetItem):
def __init__(self, parent=None):
QtGui.QTreeWidgetItem.__init__(self, parent, DOTITEM)
self.setExpanded(True)
self.setIcon(0, CurrentTheme.DOT_PRODUCT_ICON)
self.setText(0, 'Dot')
class CrossItem(QtGui.QTreeWidgetItem):
def __init__(self, parent=None):
QtGui.QTreeWidgetItem.__init__(self, parent, CROSSITEM)
self.setExpanded(True)
self.setIcon(0, CurrentTheme.CROSS_PRODUCT_ICON)
self.setText(0, 'Cross')
class QPortCombineTreeWidget(QtGui.QTreeWidget):
def __init__(self, parent=None):
QtGui.QTreeWidget.__init__(self, parent)
self.setDragDropMode(QtGui.QAbstractItemView.InternalMove)
self.header().hide()
self.setExpandsOnDoubleClick(False)
self.setItemsExpandable(False)
self.setRootIsDecorated(False)
self.expandAll()
self.setToolTip("Right-click to add dot/cross product. Rearrange "
"items to get suitable order. 'Del' key deletes "
"selected product.")
def dropEvent(self, event):
QtGui.QTreeWidget.dropEvent(self, event)
self.expandAll()
def loadNode(self, parent, node):
# populate widget from json struct
if isinstance(node, basestring):
PortItem(node, parent)
else:
item = DotItem(parent) if node[0] == 'pairwise' \
else CrossItem(parent)
for i in node[1:]:
self.loadNode(item, i)
def saveNode(self, item):
# populate json struct from widget items
if item.type()==PORTITEM:
return item.text(0)
L = ['pairwise'] if item.type()==DOTITEM else ['cartesian']
L.extend([self.saveNode(item.child(i))
for i in xrange(item.childCount())])
L = [i for i in L if i is not None]
if len(L)<2:
L = None
return L
def setValue(self, value):
self.clear()
value = json.loads(value)
for v in value[1:]:
self.loadNode(self.invisibleRootItem(), v)
def getValue(self):
nodes = [self.topLevelItem(i)
for i in xrange(self.topLevelItemCount())]
L = ['cartesian'] # default
L.extend([self.saveNode(node) for node in nodes])
L = [i for i in L if i is not None]
if len(L)<2:
L = None
return json.dumps(L)
def setDefault(self, module):
self.clear()
if not module:
return
for port_name in module.iterated_ports:
PortItem(port_name, self)
def contextMenuEvent(self, event):
menu = QtGui.QMenu()
dotAction = QtGui.QAction(CurrentTheme.DOT_PRODUCT_ICON,
'Add Pairwise Product', self)
dotAction.triggered.connect(self.addDot)
menu.addAction(dotAction)
crossAction = QtGui.QAction(CurrentTheme.CROSS_PRODUCT_ICON,
'Add Cartesian Product', self)
crossAction.triggered.connect(self.addCross)
menu.addAction(crossAction)
menu.exec_(event.globalPos())
event.accept()
def addDot(self):
DotItem(self)
def addCross(self):
CrossItem(self)
def keyPressEvent(self, event):
""" keyPressEvent(event: QKeyEvent) -> None
Capture 'Del', 'Backspace' for deleting items.
Ctrl+C, Ctrl+V, Ctrl+A for copy, paste and select all
"""
items = self.selectedItems()
if (len(items)==1 and \
event.key() in [QtCore.Qt.Key_Backspace, QtCore.Qt.Key_Delete]) and \
type(items[0]) in [DotItem, CrossItem] and\
not items[0].childCount():
item = items[0]
if item.parent():
item.parent().takeChild(item.parent().indexOfChild(item))
else:
self.takeTopLevelItem(self.indexOfTopLevelItem(item))
else:
QtGui.QTreeWidget.keyPressEvent(self, event)
class TestIterationGui(unittest.TestCase):
def testGetSet(self):
p = QPortCombineTreeWidget()
v = '["cartesian", ["pairwise", "a", "b"], "c"]'
p.setValue(v)
self.assertEqual(v, p.getValue())
| |
#!/usr/bin/env python2.7
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Definition of targets to build artifacts."""
import os.path
import sys
import jobset
def create_docker_jobspec(name, dockerfile_dir, shell_command, environ={},
flake_retries=0, timeout_retries=0, timeout_seconds=30*60):
"""Creates jobspec for a task running under docker."""
environ = environ.copy()
environ['RUN_COMMAND'] = shell_command
docker_args=[]
for k,v in environ.items():
docker_args += ['-e', '%s=%s' % (k, v)]
docker_env = {'DOCKERFILE_DIR': dockerfile_dir,
'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh',
'OUTPUT_DIR': 'artifacts'}
jobspec = jobset.JobSpec(
cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] + docker_args,
environ=docker_env,
shortname='build_artifact.%s' % (name),
timeout_seconds=timeout_seconds,
flake_retries=flake_retries,
timeout_retries=timeout_retries)
return jobspec
def create_jobspec(name, cmdline, environ=None, shell=False,
flake_retries=0, timeout_retries=0, timeout_seconds=30*60):
"""Creates jobspec."""
jobspec = jobset.JobSpec(
cmdline=cmdline,
environ=environ,
shortname='build_artifact.%s' % (name),
timeout_seconds=timeout_seconds,
flake_retries=flake_retries,
timeout_retries=timeout_retries,
shell=shell)
return jobspec
_MACOS_COMPAT_FLAG = '-mmacosx-version-min=10.7'
_ARCH_FLAG_MAP = {
'x86': '-m32',
'x64': '-m64'
}
python_windows_version_arch_map = {
('x86', '2.7'): 'Python27_32bits',
('x64', '2.7'): 'Python27',
('x86', '3.4'): 'Python34_32bits',
('x64', '3.4'): 'Python34',
}
class PythonArtifact:
"""Builds Python artifacts."""
def __init__(self, platform, arch, python_version, manylinux_build=None):
if manylinux_build:
self.name = 'python%s_%s_%s_%s' % (python_version, platform, arch, manylinux_build)
else:
self.name = 'python%s_%s_%s' % (python_version, platform, arch)
self.platform = platform
self.arch = arch
self.labels = ['artifact', 'python', python_version, platform, arch]
self.python_version = python_version
self.python_windows_prefix = python_windows_version_arch_map[arch, python_version]
self.manylinux_build = manylinux_build
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
environ = {}
if self.platform == 'linux':
if self.arch == 'x86':
environ['SETARCH_CMD'] = 'linux32'
# Inside the manylinux container, the python installations are located in
# special places...
environ['PYTHON'] = '/opt/python/{}/bin/python'.format(self.manylinux_build)
environ['PIP'] = '/opt/python/{}/bin/pip'.format(self.manylinux_build)
# Platform autodetection for the manylinux1 image breaks so we set the
# defines ourselves.
# TODO(atash) get better platform-detection support in core so we don't
# need to do this manually...
environ['CFLAGS'] = '-DGPR_MANYLINUX1=1'
environ['BUILD_HEALTH_CHECKING'] = 'TRUE'
environ['BUILD_MANYLINUX_WHEEL'] = 'TRUE'
return create_docker_jobspec(self.name,
'tools/dockerfile/grpc_artifact_python_manylinux_%s' % self.arch,
'tools/run_tests/build_artifact_python.sh',
environ=environ,
timeout_seconds=60*60)
elif self.platform == 'windows':
return create_jobspec(self.name,
['tools\\run_tests\\build_artifact_python.bat',
self.python_windows_prefix,
'32' if self.arch == 'x86' else '64'
],
shell=True)
else:
environ['PYTHON'] = 'python{}'.format(self.python_version)
return create_jobspec(self.name,
['tools/run_tests/build_artifact_python.sh'],
environ=environ)
def __str__(self):
return self.name
class RubyArtifact:
"""Builds ruby native gem."""
def __init__(self, platform, arch):
self.name = 'ruby_native_gem_%s_%s' % (platform, arch)
self.platform = platform
self.arch = arch
self.labels = ['artifact', 'ruby', platform, arch]
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
if self.platform == 'windows':
raise Exception("Not supported yet")
else:
if self.platform == 'linux':
environ = {}
if self.arch == 'x86':
environ['SETARCH_CMD'] = 'linux32'
return create_docker_jobspec(self.name,
'tools/dockerfile/grpc_artifact_linux_%s' % self.arch,
'tools/run_tests/build_artifact_ruby.sh',
environ=environ)
else:
return create_jobspec(self.name,
['tools/run_tests/build_artifact_ruby.sh'])
class CSharpExtArtifact:
"""Builds C# native extension library"""
def __init__(self, platform, arch):
self.name = 'csharp_ext_%s_%s' % (platform, arch)
self.platform = platform
self.arch = arch
self.labels = ['artifact', 'csharp', platform, arch]
def pre_build_jobspecs(self):
if self.platform == 'windows':
return [create_jobspec('prebuild_%s' % self.name,
['tools\\run_tests\\pre_build_c.bat'],
shell=True,
flake_retries=5,
timeout_retries=2)]
else:
return []
def build_jobspec(self):
if self.platform == 'windows':
msbuild_platform = 'Win32' if self.arch == 'x86' else self.arch
return create_jobspec(self.name,
['tools\\run_tests\\build_artifact_csharp.bat',
'vsprojects\\grpc_csharp_ext.sln',
'/p:Configuration=Release',
'/p:PlatformToolset=v120',
'/p:Platform=%s' % msbuild_platform],
shell=True)
else:
environ = {'CONFIG': 'opt',
'EMBED_OPENSSL': 'true',
'EMBED_ZLIB': 'true',
'CFLAGS': '-DGPR_BACKWARDS_COMPATIBILITY_MODE',
'LDFLAGS': ''}
if self.platform == 'linux':
return create_docker_jobspec(self.name,
'tools/dockerfile/grpc_artifact_linux_%s' % self.arch,
'tools/run_tests/build_artifact_csharp.sh',
environ=environ)
else:
archflag = _ARCH_FLAG_MAP[self.arch]
environ['CFLAGS'] += ' %s %s' % (archflag, _MACOS_COMPAT_FLAG)
environ['LDFLAGS'] += ' %s' % archflag
return create_jobspec(self.name,
['tools/run_tests/build_artifact_csharp.sh'],
environ=environ)
def __str__(self):
return self.name
node_gyp_arch_map = {
'x86': 'ia32',
'x64': 'x64'
}
class NodeExtArtifact:
"""Builds Node native extension"""
def __init__(self, platform, arch):
self.name = 'node_ext_{0}_{1}'.format(platform, arch)
self.platform = platform
self.arch = arch
self.gyp_arch = node_gyp_arch_map[arch]
self.labels = ['artifact', 'node', platform, arch]
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
if self.platform == 'windows':
return create_jobspec(self.name,
['tools\\run_tests\\build_artifact_node.bat',
self.gyp_arch],
shell=True)
else:
if self.platform == 'linux':
return create_docker_jobspec(
self.name,
'tools/dockerfile/grpc_artifact_linux_{}'.format(self.arch),
'tools/run_tests/build_artifact_node.sh {}'.format(self.gyp_arch))
else:
return create_jobspec(self.name,
['tools/run_tests/build_artifact_node.sh',
self.gyp_arch])
class PHPArtifact:
"""Builds PHP PECL package"""
def __init__(self, platform, arch):
self.name = 'php_pecl_package_{0}_{1}'.format(platform, arch)
self.platform = platform
self.arch = arch
self.labels = ['artifact', 'php', platform, arch]
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
if self.platform == 'linux':
return create_docker_jobspec(
self.name,
'tools/dockerfile/grpc_artifact_linux_{}'.format(self.arch),
'tools/run_tests/build_artifact_php.sh')
else:
return create_jobspec(self.name,
['tools/run_tests/build_artifact_php.sh'])
class ProtocArtifact:
"""Builds protoc and protoc-plugin artifacts"""
def __init__(self, platform, arch):
self.name = 'protoc_%s_%s' % (platform, arch)
self.platform = platform
self.arch = arch
self.labels = ['artifact', 'protoc', platform, arch]
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
if self.platform != 'windows':
cxxflags = '-DNDEBUG %s' % _ARCH_FLAG_MAP[self.arch]
ldflags = '%s' % _ARCH_FLAG_MAP[self.arch]
if self.platform != 'macos':
ldflags += ' -static-libgcc -static-libstdc++ -s'
environ={'CONFIG': 'opt',
'CXXFLAGS': cxxflags,
'LDFLAGS': ldflags,
'PROTOBUF_LDFLAGS_EXTRA': ldflags}
if self.platform == 'linux':
return create_docker_jobspec(self.name,
'tools/dockerfile/grpc_artifact_protoc',
'tools/run_tests/build_artifact_protoc.sh',
environ=environ)
else:
environ['CXXFLAGS'] += ' -std=c++11 -stdlib=libc++ %s' % _MACOS_COMPAT_FLAG
return create_jobspec(self.name,
['tools/run_tests/build_artifact_protoc.sh'],
environ=environ)
else:
generator = 'Visual Studio 12 Win64' if self.arch == 'x64' else 'Visual Studio 12'
vcplatform = 'x64' if self.arch == 'x64' else 'Win32'
return create_jobspec(self.name,
['tools\\run_tests\\build_artifact_protoc.bat'],
environ={'generator': generator,
'Platform': vcplatform})
def __str__(self):
return self.name
def targets():
"""Gets list of supported targets"""
return ([Cls(platform, arch)
for Cls in (CSharpExtArtifact, NodeExtArtifact, ProtocArtifact)
for platform in ('linux', 'macos', 'windows')
for arch in ('x86', 'x64')] +
[PythonArtifact('linux', 'x86', '2.7', 'cp27-cp27m'),
PythonArtifact('linux', 'x86', '2.7', 'cp27-cp27mu'),
PythonArtifact('linux', 'x64', '2.7', 'cp27-cp27m'),
PythonArtifact('linux', 'x64', '2.7', 'cp27-cp27mu'),
PythonArtifact('macos', 'x64', '2.7'),
PythonArtifact('windows', 'x86', '2.7'),
PythonArtifact('windows', 'x64', '2.7'),
PythonArtifact('linux', 'x86', '3.4', 'cp34-cp34m'),
PythonArtifact('linux', 'x64', '3.4', 'cp34-cp34m'),
PythonArtifact('macos', 'x64', '3.4'),
PythonArtifact('windows', 'x86', '3.4'),
PythonArtifact('windows', 'x64', '3.4'),
RubyArtifact('linux', 'x86'),
RubyArtifact('linux', 'x64'),
RubyArtifact('macos', 'x64'),
PHPArtifact('linux', 'x64'),
PHPArtifact('macos', 'x64')])
| |
# Copyright (c) 2014, Henry Huang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from collections import namedtuple
from botocore import xform_name
from kamboo.core import KambooConnection
from kamboo.exceptions import KambooException, TooManyRecordsException
from kamboo.utils import compare_list_of_dict, clean_null_items
from kamboo.utils import wait_to_complete
log = logging.getLogger(__name__)
class ImageCollection(KambooConnection):
"""
Represents a collection of EC2 Images
"""
def __init__(self, service_name="ec2", region_name=None,
account_id=None, credentials=None):
super(ImageCollection, self).__init__(service_name,
region_name,
account_id,
credentials)
def copy_resource(self, source_region, source_id,
name=None, description=None):
params = {"source_region": source_region,
"source_image_id": source_id,
"name": name,
"description": description}
r_data = self.conn.copy_image(**clean_null_items(params))
if "ImageId" not in r_data:
raise KambooException(
"Fail to copy the image '%s:%s'" % (source_region,
source_id))
return Image(r_data["ImageId"], collection=self)
def wait_to_copy_resource(self, source_region, source_id,
name=None, description=None):
image = self.copy_resource(source_region=source_region,
source_id=source_id,
name=name,
description=description)
return wait_to_complete(resource=image, expected_status="available")
def get_resource_attribute(self, image_id):
"""
Fetch the attribute of the specified EC2 Image
"""
r_data = self.conn.describe_images(image_ids=[image_id])
if "Images" not in r_data:
raise KambooException("No such image attribute found")
if len(r_data["Images"]) > 1:
raise TooManyRecordsException("More than two images found")
attr_dict = r_data["Images"][0]
try:
attr_dict.update(
{"Permission": self.get_resource_permission(image_id)})
except Exception as e:
pass
name = ''.join([self.__class__.__name__, "Attribute"])
keys = [xform_name(key) for key in attr_dict.keys()]
return namedtuple(name, keys)(*attr_dict.values())
def get_resource_permission(self, image_id):
"""
Fetch the permission of the specified EC2 Image
"""
r_data = self.conn.describe_image_attribute(
image_id=image_id,
attribute="launchPermission")
if "LaunchPermissions" not in r_data:
raise KambooException("No such image permission found")
return r_data["LaunchPermissions"]
def set_resource_permission(self, id, old, new):
"""
Modify the permission of the specified EC2 Image
"""
permission_diff = compare_list_of_dict(old, new)
params = clean_null_items(permission_diff)
if params:
self.conn.modify_image_attribute(image_id=id,
launch_permission=params)
def get_resource_tags(self, image_id):
"""
Fetch the tags of the specified EC2 Image
"""
r_data = self.conn.describe_tags(resources=[image_id])
if "Tags" not in r_data:
raise KambooException("No such image tags found")
return r_data["Tags"]
def set_resource_tags(self, image_id, tags=None):
"""
Modify the tags of the specified EC2 Image
"""
r_data = self.conn.create_tags(resources=[image_id], tags=tags)
if "return" in r_data:
if r_data["return"] == "true":
return
raise KambooException("Fail to add tags to the specified image")
def delete_resource(self, image_id):
"""
Delete the specified EC2 Image
"""
r_data = self.collection.conn.deregister_image(image_id=id)
if "return" in r_data:
if r_data["return"] == "true":
return
raise KambooException("Fail to delete the specified image")
class Image(object):
"""
Represents an EC2 Image
"""
def __init__(self, id, attribute=None, collection=None):
self.id = id
self.collection = collection
self.refresh_resource_attribute(id, attribute)
def __repr__(self):
return 'Image:%s' % self.id
@property
def desc(self):
return self._desc
@desc.setter
def desc(self, value):
self._desc = self.collection.conn.modify_image_attribute(
image_id=self.id, description=value)
@property
def status(self):
self.refresh_resource_attribute()
return self.state
@property
def tags(self):
return self._tags
@tags.setter
def tags(self, value):
self.collection.set_resource_tags(self.id, tags=value)
self._tags = value
@property
def permission(self):
return self._permission
@permission.setter
def permission(self, value):
self.collection.set_resource_permission(
self.id, self._permission, value)
self._permission = value
def refresh_resource_attribute(self, id=None, attribute=None):
if id is None:
id = self.id
if not attribute:
attribute = self.collection.get_resource_attribute(id)
self.__dict__.update(attribute._asdict())
self.id = self.image_id
self._tags = getattr(attribute, "tags", [])
self._desc = getattr(attribute, "description", "")
self._permission = getattr(attribute, "permission", [])
def add_permission(self, account_id):
new_permission = []
new_permission.extend(self.permission)
new_permission.append({"UserId": account_id})
self.permission = new_permission
def delete(self, id=None):
if id is None:
id = self.id
self.collection.delete_resource(id)
| |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from collections import defaultdict
import fnmatch
import json
import os
import re
import subprocess
import sys
# TODO(dcheng): It's kind of horrible that this is copy and pasted from
# presubmit_canned_checks.py, but it's far easier than any of the alternatives.
def _ReportErrorFileAndLine(filename, line_num, dummy_line):
"""Default error formatter for _FindNewViolationsOfRule."""
return '%s:%s' % (filename, line_num)
class MockCannedChecks(object):
def _FindNewViolationsOfRule(self, callable_rule, input_api,
source_file_filter=None,
error_formatter=_ReportErrorFileAndLine):
"""Find all newly introduced violations of a per-line rule (a callable).
Arguments:
callable_rule: a callable taking a file extension and line of input and
returning True if the rule is satisfied and False if there was a
problem.
input_api: object to enumerate the affected files.
source_file_filter: a filter to be passed to the input api.
error_formatter: a callable taking (filename, line_number, line) and
returning a formatted error string.
Returns:
A list of the newly-introduced violations reported by the rule.
"""
errors = []
for f in input_api.AffectedFiles(include_deletes=False,
file_filter=source_file_filter):
# For speed, we do two passes, checking first the full file. Shelling out
# to the SCM to determine the changed region can be quite expensive on
# Win32. Assuming that most files will be kept problem-free, we can
# skip the SCM operations most of the time.
extension = str(f.LocalPath()).rsplit('.', 1)[-1]
if all(callable_rule(extension, line) for line in f.NewContents()):
continue # No violation found in full text: can skip considering diff.
for line_num, line in f.ChangedContents():
if not callable_rule(extension, line):
errors.append(error_formatter(f.LocalPath(), line_num, line))
return errors
class MockInputApi(object):
"""Mock class for the InputApi class.
This class can be used for unittests for presubmit by initializing the files
attribute as the list of changed files.
"""
DEFAULT_BLACK_LIST = ()
def __init__(self):
self.canned_checks = MockCannedChecks()
self.fnmatch = fnmatch
self.json = json
self.re = re
self.os_path = os.path
self.platform = sys.platform
self.python_executable = sys.executable
self.platform = sys.platform
self.subprocess = subprocess
self.sys = sys
self.files = []
self.is_committing = False
self.change = MockChange([])
self.presubmit_local_path = os.path.dirname(__file__)
def CreateMockFileInPath(self, f_list):
self.os_path.exists = lambda x: x in f_list
def AffectedFiles(self, file_filter=None, include_deletes=False):
for file in self.files:
if file_filter and not file_filter(file):
continue
if not include_deletes and file.Action() == 'D':
continue
yield file
def AffectedSourceFiles(self, file_filter=None):
return self.AffectedFiles(file_filter=file_filter)
def FilterSourceFile(self, file, white_list=(), black_list=()):
local_path = file.LocalPath()
found_in_white_list = not white_list
if white_list:
if type(white_list) is str:
raise TypeError('white_list should be an iterable of strings')
for pattern in white_list:
compiled_pattern = re.compile(pattern)
if compiled_pattern.search(local_path):
found_in_white_list = True
break
if black_list:
if type(black_list) is str:
raise TypeError('black_list should be an iterable of strings')
for pattern in black_list:
compiled_pattern = re.compile(pattern)
if compiled_pattern.search(local_path):
return False
return found_in_white_list
def LocalPaths(self):
return [file.LocalPath() for file in self.files]
def PresubmitLocalPath(self):
return self.presubmit_local_path
def ReadFile(self, filename, mode='rU'):
if hasattr(filename, 'AbsoluteLocalPath'):
filename = filename.AbsoluteLocalPath()
for file_ in self.files:
if file_.LocalPath() == filename:
return '\n'.join(file_.NewContents())
# Otherwise, file is not in our mock API.
raise IOError, "No such file or directory: '%s'" % filename
class MockOutputApi(object):
"""Mock class for the OutputApi class.
An instance of this class can be passed to presubmit unittests for outputing
various types of results.
"""
class PresubmitResult(object):
def __init__(self, message, items=None, long_text=''):
self.message = message
self.items = items
self.long_text = long_text
def __repr__(self):
return self.message
class PresubmitError(PresubmitResult):
def __init__(self, message, items=None, long_text=''):
MockOutputApi.PresubmitResult.__init__(self, message, items, long_text)
self.type = 'error'
class PresubmitPromptWarning(PresubmitResult):
def __init__(self, message, items=None, long_text=''):
MockOutputApi.PresubmitResult.__init__(self, message, items, long_text)
self.type = 'warning'
class PresubmitNotifyResult(PresubmitResult):
def __init__(self, message, items=None, long_text=''):
MockOutputApi.PresubmitResult.__init__(self, message, items, long_text)
self.type = 'notify'
class PresubmitPromptOrNotify(PresubmitResult):
def __init__(self, message, items=None, long_text=''):
MockOutputApi.PresubmitResult.__init__(self, message, items, long_text)
self.type = 'promptOrNotify'
def __init__(self):
self.more_cc = []
def AppendCC(self, more_cc):
self.more_cc.extend(more_cc)
class MockFile(object):
"""Mock class for the File class.
This class can be used to form the mock list of changed files in
MockInputApi for presubmit unittests.
"""
def __init__(self, local_path, new_contents, old_contents=None, action='A'):
self._local_path = local_path
self._new_contents = new_contents
self._changed_contents = [(i + 1, l) for i, l in enumerate(new_contents)]
self._action = action
self._scm_diff = "--- /dev/null\n+++ %s\n@@ -0,0 +1,%d @@\n" % (local_path,
len(new_contents))
self._old_contents = old_contents
for l in new_contents:
self._scm_diff += "+%s\n" % l
def Action(self):
return self._action
def ChangedContents(self):
return self._changed_contents
def NewContents(self):
return self._new_contents
def LocalPath(self):
return self._local_path
def AbsoluteLocalPath(self):
return self._local_path
def GenerateScmDiff(self):
return self._scm_diff
def OldContents(self):
return self._old_contents
def rfind(self, p):
"""os.path.basename is called on MockFile so we need an rfind method."""
return self._local_path.rfind(p)
def __getitem__(self, i):
"""os.path.basename is called on MockFile so we need a get method."""
return self._local_path[i]
def __len__(self):
"""os.path.basename is called on MockFile so we need a len method."""
return len(self._local_path)
def replace(self, altsep, sep):
"""os.path.basename is called on MockFile so we need a replace method."""
return self._local_path.replace(altsep, sep)
class MockAffectedFile(MockFile):
def AbsoluteLocalPath(self):
return self._local_path
class MockChange(object):
"""Mock class for Change class.
This class can be used in presubmit unittests to mock the query of the
current change.
"""
def __init__(self, changed_files):
self._changed_files = changed_files
self.footers = defaultdict(list)
def LocalPaths(self):
return self._changed_files
def AffectedFiles(self, include_dirs=False, include_deletes=True,
file_filter=None):
return self._changed_files
def GitFootersFromDescription(self):
return self.footers
| |
# Base classes for ASN.1 types
import sys
from pyasn1.type import constraint, tagmap, tag
from pyasn1 import error
class Asn1Item: pass
class Asn1ItemBase(Asn1Item):
# Set of tags for this ASN.1 type
tagSet = tag.TagSet()
# A list of constraint.Constraint instances for checking values
subtypeSpec = constraint.ConstraintsIntersection()
# Used for ambiguous ASN.1 types identification
typeId = None
def __init__(self, tagSet=None, subtypeSpec=None):
if tagSet is None:
self._tagSet = self.tagSet
else:
self._tagSet = tagSet
if subtypeSpec is None:
self._subtypeSpec = self.subtypeSpec
else:
self._subtypeSpec = subtypeSpec
def _verifySubtypeSpec(self, value, idx=None):
try:
self._subtypeSpec(value, idx)
except error.PyAsn1Error:
c, i, t = sys.exc_info()
raise c('%s at %s' % (i, self.__class__.__name__))
def getSubtypeSpec(self): return self._subtypeSpec
def getTagSet(self): return self._tagSet
def getEffectiveTagSet(self): return self._tagSet # used by untagged types
def getTagMap(self): return tagmap.TagMap({self._tagSet: self})
def isSameTypeWith(self, other, matchTags=True, matchConstraints=True):
return self is other or \
(not matchTags or \
self._tagSet == other.getTagSet()) and \
(not matchConstraints or \
self._subtypeSpec==other.getSubtypeSpec())
def isSuperTypeOf(self, other, matchTags=True, matchConstraints=True):
"""Returns true if argument is a ASN1 subtype of ourselves"""
return (not matchTags or \
self._tagSet.isSuperTagSetOf(other.getTagSet())) and \
(not matchConstraints or \
(self._subtypeSpec.isSuperTypeOf(other.getSubtypeSpec())))
class NoValue:
def __getattr__(self, attr):
raise error.PyAsn1Error('No value for %s()' % attr)
def __getitem__(self, i):
raise error.PyAsn1Error('No value')
def __repr__(self): return '%s()' % self.__class__.__name__
noValue = NoValue()
# Base class for "simple" ASN.1 objects. These are immutable.
class AbstractSimpleAsn1Item(Asn1ItemBase):
defaultValue = noValue
def __init__(self, value=None, tagSet=None, subtypeSpec=None):
Asn1ItemBase.__init__(self, tagSet, subtypeSpec)
if value is None or value is noValue:
value = self.defaultValue
if value is None or value is noValue:
self.__hashedValue = value = noValue
else:
value = self.prettyIn(value)
self._verifySubtypeSpec(value)
self.__hashedValue = hash(value)
self._value = value
self._len = None
def __repr__(self):
r = []
if self._value is not self.defaultValue:
r.append(self.prettyOut(self._value))
if self._tagSet is not self.tagSet:
r.append('tagSet=%r' % (self._tagSet,))
if self._subtypeSpec is not self.subtypeSpec:
r.append('subtypeSpec=%r' % (self._subtypeSpec,))
return '%s(%s)' % (self.__class__.__name__, ', '.join(r))
def __str__(self): return str(self._value)
def __eq__(self, other):
return self is other and True or self._value == other
def __ne__(self, other): return self._value != other
def __lt__(self, other): return self._value < other
def __le__(self, other): return self._value <= other
def __gt__(self, other): return self._value > other
def __ge__(self, other): return self._value >= other
if sys.version_info[0] <= 2:
def __nonzero__(self): return bool(self._value)
else:
def __bool__(self): return bool(self._value)
def __hash__(self): return self.__hashedValue
def hasValue(self):
return not isinstance(self._value, NoValue)
def clone(self, value=None, tagSet=None, subtypeSpec=None):
if value is None and tagSet is None and subtypeSpec is None:
return self
if value is None:
value = self._value
if tagSet is None:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
return self.__class__(value, tagSet, subtypeSpec)
def subtype(self, value=None, implicitTag=None, explicitTag=None,
subtypeSpec=None):
if value is None:
value = self._value
if implicitTag is not None:
tagSet = self._tagSet.tagImplicitly(implicitTag)
elif explicitTag is not None:
tagSet = self._tagSet.tagExplicitly(explicitTag)
else:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
else:
subtypeSpec = subtypeSpec + self._subtypeSpec
return self.__class__(value, tagSet, subtypeSpec)
def prettyIn(self, value): return value
def prettyOut(self, value): return str(value)
def prettyPrint(self, scope=0):
if self.hasValue():
return self.prettyOut(self._value)
else:
return '<no value>'
# XXX Compatibility stub
def prettyPrinter(self, scope=0): return self.prettyPrint(scope)
def prettyPrintType(self, scope=0):
return '%s -> %s' % (self.getTagSet(), self.__class__.__name__)
#
# Constructed types:
# * There are five of them: Sequence, SequenceOf/SetOf, Set and Choice
# * ASN1 types and values are represened by Python class instances
# * Value initialization is made for defaulted components only
# * Primary method of component addressing is by-position. Data model for base
# type is Python sequence. Additional type-specific addressing methods
# may be implemented for particular types.
# * SequenceOf and SetOf types do not implement any additional methods
# * Sequence, Set and Choice types also implement by-identifier addressing
# * Sequence, Set and Choice types also implement by-asn1-type (tag) addressing
# * Sequence and Set types may include optional and defaulted
# components
# * Constructed types hold a reference to component types used for value
# verification and ordering.
# * Component type is a scalar type for SequenceOf/SetOf types and a list
# of types for Sequence/Set/Choice.
#
class AbstractConstructedAsn1Item(Asn1ItemBase):
componentType = None
sizeSpec = constraint.ConstraintsIntersection()
def __init__(self, componentType=None, tagSet=None,
subtypeSpec=None, sizeSpec=None):
Asn1ItemBase.__init__(self, tagSet, subtypeSpec)
if componentType is None:
self._componentType = self.componentType
else:
self._componentType = componentType
if sizeSpec is None:
self._sizeSpec = self.sizeSpec
else:
self._sizeSpec = sizeSpec
self._componentValues = []
self._componentValuesSet = 0
def __repr__(self):
r = []
if self._componentType is not self.componentType:
r.append('componentType=%r' % (self._componentType,))
if self._tagSet is not self.tagSet:
r.append('tagSet=%r' % (self._tagSet,))
if self._subtypeSpec is not self.subtypeSpec:
r.append('subtypeSpec=%r' % (self._subtypeSpec,))
r = '%s(%s)' % (self.__class__.__name__, ', '.join(r))
if self._componentValues:
r += '.setComponents(%s)' % ', '.join([repr(x) for x in self._componentValues])
return r
def __eq__(self, other):
return self is other and True or self._componentValues == other
def __ne__(self, other): return self._componentValues != other
def __lt__(self, other): return self._componentValues < other
def __le__(self, other): return self._componentValues <= other
def __gt__(self, other): return self._componentValues > other
def __ge__(self, other): return self._componentValues >= other
if sys.version_info[0] <= 2:
def __nonzero__(self): return bool(self._componentValues)
else:
def __bool__(self): return bool(self._componentValues)
def getComponentTagMap(self):
raise error.PyAsn1Error('Method not implemented')
def _cloneComponentValues(self, myClone, cloneValueFlag): pass
def clone(self, tagSet=None, subtypeSpec=None, sizeSpec=None,
cloneValueFlag=None):
if tagSet is None:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
if sizeSpec is None:
sizeSpec = self._sizeSpec
r = self.__class__(self._componentType, tagSet, subtypeSpec, sizeSpec)
if cloneValueFlag:
self._cloneComponentValues(r, cloneValueFlag)
return r
def subtype(self, implicitTag=None, explicitTag=None, subtypeSpec=None,
sizeSpec=None, cloneValueFlag=None):
if implicitTag is not None:
tagSet = self._tagSet.tagImplicitly(implicitTag)
elif explicitTag is not None:
tagSet = self._tagSet.tagExplicitly(explicitTag)
else:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
else:
subtypeSpec = subtypeSpec + self._subtypeSpec
if sizeSpec is None:
sizeSpec = self._sizeSpec
else:
sizeSpec = sizeSpec + self._sizeSpec
r = self.__class__(self._componentType, tagSet, subtypeSpec, sizeSpec)
if cloneValueFlag:
self._cloneComponentValues(r, cloneValueFlag)
return r
def _verifyComponent(self, idx, value): pass
def verifySizeSpec(self): self._sizeSpec(self)
def getComponentByPosition(self, idx):
raise error.PyAsn1Error('Method not implemented')
def setComponentByPosition(self, idx, value, verifyConstraints=True):
raise error.PyAsn1Error('Method not implemented')
def setComponents(self, *args, **kwargs):
for idx in range(len(args)):
self[idx] = args[idx]
for k in kwargs:
self[k] = kwargs[k]
return self
def getComponentType(self): return self._componentType
def setDefaultComponents(self): pass
def __getitem__(self, idx): return self.getComponentByPosition(idx)
def __setitem__(self, idx, value): self.setComponentByPosition(idx, value)
def __len__(self): return len(self._componentValues)
def clear(self):
self._componentValues = []
self._componentValuesSet = 0
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tempfile
from django import http
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.core.urlresolvers import reverse
from mox import IsA
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from .tables import ContainersTable, ObjectsTable, wrap_delimiter
from . import forms
CONTAINER_INDEX_URL = reverse('horizon:project:containers:index')
class SwiftTests(test.TestCase):
def test_index_no_container_selected(self):
containers = self.containers.list()
self.mox.StubOutWithMock(api, 'swift_get_containers')
api.swift_get_containers(IsA(http.HttpRequest), marker=None) \
.AndReturn((containers, False))
self.mox.ReplayAll()
res = self.client.get(CONTAINER_INDEX_URL)
self.assertTemplateUsed(res, 'project/containers/index.html')
self.assertIn('table', res.context)
resp_containers = res.context['table'].data
self.assertEqual(len(resp_containers), len(containers))
def test_delete_container(self):
container = self.containers.get(name=u"container_two\u6346")
self.mox.StubOutWithMock(api, 'swift_delete_container')
api.swift_delete_container(IsA(http.HttpRequest), container.name)
self.mox.ReplayAll()
action_string = u"containers__delete__%s" % container.name
form_data = {"action": action_string}
req = self.factory.post(CONTAINER_INDEX_URL, form_data)
table = ContainersTable(req, self.containers.list())
handled = table.maybe_handle()
self.assertEqual(handled['location'], CONTAINER_INDEX_URL)
def test_delete_container_nonempty(self):
container = self.containers.first()
self.mox.StubOutWithMock(api, 'swift_delete_container')
exc = self.exceptions.swift
exc.silence_logging = True
api.swift_delete_container(IsA(http.HttpRequest),
container.name).AndRaise(exc)
self.mox.ReplayAll()
action_string = u"containers__delete__%s" % container.name
form_data = {"action": action_string}
req = self.factory.post(CONTAINER_INDEX_URL, form_data)
table = ContainersTable(req, self.containers.list())
handled = table.maybe_handle()
self.assertEqual(handled['location'], CONTAINER_INDEX_URL)
def test_create_container_get(self):
res = self.client.get(reverse('horizon:project:containers:create'))
self.assertTemplateUsed(res, 'project/containers/create.html')
def test_create_container_post(self):
self.mox.StubOutWithMock(api, 'swift_create_container')
api.swift_create_container(IsA(http.HttpRequest),
self.containers.first().name)
self.mox.ReplayAll()
formData = {'name': self.containers.first().name,
'method': forms.CreateContainer.__name__}
res = self.client.post(reverse('horizon:project:containers:create'),
formData)
url = reverse('horizon:project:containers:index',
args=[wrap_delimiter(self.containers.first().name)])
self.assertRedirectsNoFollow(res, url)
def test_index_container_selected(self):
self.mox.StubOutWithMock(api, 'swift_get_containers')
self.mox.StubOutWithMock(api, 'swift_get_objects')
containers = (self.containers.list(), False)
ret = (self.objects.list(), False)
api.swift_get_containers(IsA(http.HttpRequest),
marker=None).AndReturn(containers)
api.swift_get_objects(IsA(http.HttpRequest),
self.containers.first().name,
marker=None,
prefix=None).AndReturn(ret)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:containers:index',
args=[wrap_delimiter(self.containers
.first()
.name)]))
self.assertTemplateUsed(res, 'project/containers/index.html')
# UTF8 encoding here to ensure there aren't problems with Nose output.
expected = [obj.name.encode('utf8') for obj in self.objects.list()]
self.assertQuerysetEqual(res.context['objects_table'].data,
expected,
lambda obj: obj.name.encode('utf8'))
def test_upload(self):
container = self.containers.first()
obj = self.objects.first()
OBJECT_DATA = 'objectData'
temp_file = tempfile.TemporaryFile()
temp_file.write(OBJECT_DATA)
temp_file.flush()
temp_file.seek(0)
self.mox.StubOutWithMock(api, 'swift_upload_object')
api.swift_upload_object(IsA(http.HttpRequest),
container.name,
obj.name,
IsA(InMemoryUploadedFile)).AndReturn(obj)
self.mox.ReplayAll()
upload_url = reverse('horizon:project:containers:object_upload',
args=[container.name])
res = self.client.get(upload_url)
self.assertTemplateUsed(res, 'project/containers/upload.html')
res = self.client.get(upload_url)
self.assertContains(res, 'enctype="multipart/form-data"')
formData = {'method': forms.UploadObject.__name__,
'container_name': container.name,
'name': obj.name,
'object_file': temp_file}
res = self.client.post(upload_url, formData)
index_url = reverse('horizon:project:containers:index',
args=[wrap_delimiter(container.name)])
self.assertRedirectsNoFollow(res, index_url)
# Test invalid filename
formData['name'] = "contains/a/slash"
res = self.client.post(upload_url, formData)
self.assertNoMessages()
self.assertContains(res, "Slash is not an allowed character.")
def test_delete(self):
container = self.containers.first()
obj = self.objects.first()
index_url = reverse('horizon:project:containers:index',
args=[wrap_delimiter(container.name)])
self.mox.StubOutWithMock(api, 'swift_delete_object')
api.swift_delete_object(IsA(http.HttpRequest),
container.name,
obj.name)
self.mox.ReplayAll()
action_string = "objects__delete_object__%s" % obj.name
form_data = {"action": action_string}
req = self.factory.post(index_url, form_data)
kwargs = {"container_name": container.name}
table = ObjectsTable(req, self.objects.list(), **kwargs)
handled = table.maybe_handle()
self.assertEqual(handled['location'], index_url)
def test_download(self):
container = self.containers.first()
obj = self.objects.first()
self.mox.StubOutWithMock(api.swift, 'swift_get_object')
api.swift.swift_get_object(IsA(http.HttpRequest),
container.name,
obj.name).AndReturn(obj)
self.mox.ReplayAll()
download_url = reverse('horizon:project:containers:object_download',
args=[container.name, obj.name])
res = self.client.get(download_url)
self.assertEqual(res.content, obj.data)
self.assertTrue(res.has_header('Content-Disposition'))
def test_copy_index(self):
self.mox.StubOutWithMock(api, 'swift_get_containers')
ret = (self.containers.list(), False)
api.swift_get_containers(IsA(http.HttpRequest)).AndReturn(ret)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:containers:object_copy',
args=[self.containers.first().name,
self.objects.first().name]))
self.assertTemplateUsed(res, 'project/containers/copy.html')
def test_copy(self):
container_1 = self.containers.get(name=u"container_one\u6346")
container_2 = self.containers.get(name=u"container_two\u6346")
obj = self.objects.first()
self.mox.StubOutWithMock(api, 'swift_get_containers')
self.mox.StubOutWithMock(api, 'swift_copy_object')
ret = (self.containers.list(), False)
api.swift_get_containers(IsA(http.HttpRequest)).AndReturn(ret)
api.swift_copy_object(IsA(http.HttpRequest),
container_1.name,
obj.name,
container_2.name,
obj.name)
self.mox.ReplayAll()
formData = {'method': forms.CopyObject.__name__,
'new_container_name': container_2.name,
'new_object_name': obj.name,
'orig_container_name': container_1.name,
'orig_object_name': obj.name}
copy_url = reverse('horizon:project:containers:object_copy',
args=[container_1.name, obj.name])
res = self.client.post(copy_url, formData)
index_url = reverse('horizon:project:containers:index',
args=[wrap_delimiter(container_2.name)])
self.assertRedirectsNoFollow(res, index_url)
| |
# -*- coding: utf-8 -*-
"""Module with the Graphviz drawing calls."""
#
# (C) Pywikibot team, 2006-2018
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, division, unicode_literals
from collections import Counter
import itertools
import threading
try:
import pydot
except ImportError as e:
pydot = e
import pywikibot
from pywikibot import config2 as config
# deprecated value
pydotfound = not isinstance(pydot, ImportError)
class GraphImpossible(Exception):
"""Drawing a graph is not possible on your system."""
class GraphSavingThread(threading.Thread):
"""
Threaded graph renderer.
Rendering a graph can take extremely long. We use
multithreading because of that.
TODO: Find out if several threads running in parallel
can slow down the system too much. Consider adding a
mechanism to kill a thread if it takes too long.
"""
def __init__(self, graph, originPage):
"""Initializer."""
threading.Thread.__init__(self)
self.graph = graph
self.originPage = originPage
def run(self):
"""Write graphs to the data directory."""
for format in config.interwiki_graph_formats:
filename = config.datafilepath(
'interwiki-graphs/' + getFilename(self.originPage, format))
if self.graph.write(filename, prog='dot', format=format):
pywikibot.output('Graph saved as %s' % filename)
else:
pywikibot.output('Graph could not be saved as %s' % filename)
class Subject(object):
"""Data about a page with translations on multiple wikis."""
def __init__(self, origin=None):
"""Initializer.
@param originPage: the page on the 'origin' wiki
@type originPage: pywikibot.page.Page
"""
# Remember the "origin page"
self._origin = origin
# Temporary variable to support git blame; do not use
originPage = origin
self.found_in = None
# foundIn is a dictionary where pages are keys and lists of
# pages are values. It stores where we found each page.
# As we haven't yet found a page that links to the origin page, we
# start with an empty list for it.
if originPage:
self.foundIn = {self.originPage: []}
else:
self.foundIn = {}
@property
def origin(self):
"""Page on the origin wiki."""
return self._origin
@origin.setter
def origin(self, value):
self._origin = value
@property
def originPage(self):
"""Deprecated property for the origin page.
DEPRECATED. Use origin.
"""
# TODO: deprecate this property
return self.origin
@originPage.setter
def originPage(self, value):
self.origin = value
@property
def foundIn(self):
"""Mapping of pages to others pages interwiki linked to it.
DEPRECATED. Use found_in.
"""
# TODO: deprecate this property
return self.found_in
@foundIn.setter
def foundIn(self, value):
"""Temporary property setter to support code migration."""
self.found_in = value
class GraphDrawer(object):
"""Graphviz (dot) code creator."""
def __init__(self, subject):
"""Initializer.
@param subject: page data to graph
@type subject: pywikibot.interwiki_graph.Subject
@raises GraphImpossible: pydot is not installed
"""
if isinstance(pydot, ImportError):
raise GraphImpossible('pydot is not installed: %s.' % pydot)
self.graph = None
self.subject = subject
def getLabel(self, page):
"""Get label for page."""
return '"%s:%s"' % (page.site.code, page.title())
def _octagon_site_set(self):
"""Build a list of sites with more than one valid page."""
page_list = self.subject.found_in.keys()
# Only track sites of normal pages
each_site = [page.site for page in page_list
if page.exists() and not page.isRedirectPage()]
return {x[0] for x in itertools.takewhile(
lambda x: x[1] > 1,
Counter(each_site).most_common())}
def addNode(self, page):
"""Add a node for page."""
node = pydot.Node(self.getLabel(page), shape='rectangle')
node.set_URL('"http://%s%s"'
% (page.site.hostname(),
page.site.get_address(page.title(as_url=True))))
node.set_style('filled')
node.set_fillcolor('white')
node.set_fontsize('11')
if not page.exists():
node.set_fillcolor('red')
elif page.isRedirectPage():
node.set_fillcolor('blue')
elif page.isDisambig():
node.set_fillcolor('orange')
if page.namespace() != self.subject.originPage.namespace():
node.set_color('green')
node.set_style('filled,bold')
if page.site in self.octagon_sites:
# mark conflict by octagonal node
node.set_shape('octagon')
self.graph.add_node(node)
def addDirectedEdge(self, page, refPage):
"""Add a directed edge from refPage to page."""
# if page was given as a hint, referrers would be [None]
if refPage is not None:
sourceLabel = self.getLabel(refPage)
targetLabel = self.getLabel(page)
edge = pydot.Edge(sourceLabel, targetLabel)
oppositeEdge = self.graph.get_edge(targetLabel, sourceLabel)
if oppositeEdge:
if isinstance(oppositeEdge, list):
# bugfix for pydot >= 1.0.3
oppositeEdge = oppositeEdge[0]
oppositeEdge.set_dir('both')
# workaround for sf.net bug 401: prevent duplicate edges
# (it is unclear why duplicate edges occur)
# https://sourceforge.net/p/pywikipediabot/bugs/401/
elif self.graph.get_edge(sourceLabel, targetLabel):
pywikibot.output(
'BUG: Tried to create duplicate edge from %s to %s'
% (refPage.title(as_link=True), page.title(as_link=True)))
# duplicate edges would be bad because then get_edge() would
# give a list of edges, not a single edge when we handle the
# opposite edge.
else:
# add edge
if refPage.site == page.site:
edge.set_color('blue')
elif not page.exists():
# mark dead links
edge.set_color('red')
elif refPage.isDisambig() != page.isDisambig():
# mark links between disambiguation and non-disambiguation
# pages
edge.set_color('orange')
if refPage.namespace() != page.namespace():
edge.set_color('green')
self.graph.add_edge(edge)
def saveGraphFile(self):
"""Write graphs to the data directory."""
thread = GraphSavingThread(self.graph, self.subject.originPage)
thread.start()
def createGraph(self):
"""
Create graph of the interwiki links.
For more info see U{https://meta.wikimedia.org/wiki/Interwiki_graphs}
"""
pywikibot.output('Preparing graph for %s'
% self.subject.originPage.title())
# create empty graph
self.graph = pydot.Dot()
# self.graph.set('concentrate', 'true')
self.octagon_sites = self._octagon_site_set()
for page in self.subject.foundIn.keys():
# a node for each found page
self.addNode(page)
# mark start node by pointing there from a black dot.
firstLabel = self.getLabel(self.subject.originPage)
self.graph.add_node(pydot.Node('start', shape='point'))
self.graph.add_edge(pydot.Edge('start', firstLabel))
for page, referrers in self.subject.foundIn.items():
for refPage in referrers:
self.addDirectedEdge(page, refPage)
self.saveGraphFile()
def getFilename(page, extension=None):
"""
Create a filename that is unique for the page.
@param page: page used to create the new filename
@type page: pywikibot.page.Page
@param extension: file extension
@type extension: str
@return: filename of <family>-<lang>-<page>.<ext>
@rtype: str
"""
filename = '%s-%s-%s' % (page.site.family.name,
page.site.code,
page.title(as_filename=True))
if extension:
filename += '.%s' % extension
return filename
| |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import random
from op_test import OpTest
def seqconv(x,
lod,
filter,
context_length,
context_start,
padding_trainable=False,
padding_data=None):
[T, M] = x.shape
col = np.zeros((T, context_length * M)).astype('float32')
offset = [0]
for seq_len in lod[0]:
offset.append(offset[-1] + seq_len)
begin_pad = np.max([0, -context_start])
for i in range(len(offset) - 1):
for j in range(context_length):
in_begin = offset[i] + context_start + j
in_end = offset[i + 1] + context_start + j
out_begin = offset[i]
out_end = offset[i + 1]
if in_begin < offset[i]:
pad_size = np.min(
[offset[i] - in_begin, offset[i + 1] - offset[i]])
if padding_trainable:
sub_w = padding_data[j:j + pad_size, :]
col[offset[i]:offset[i] + pad_size, j * M:(j + 1) *
M] = sub_w
out_begin = offset[i] + pad_size
in_begin = offset[i]
if in_end > offset[i + 1]:
pad_size = np.min(
[in_end - offset[i + 1], offset[i + 1] - offset[i]])
if padding_trainable:
sub_w = padding_data[begin_pad + context_start + j -
pad_size:begin_pad + context_start +
j, :]
col[offset[i + 1] - pad_size:offset[i + 1], j * M:(j + 1) *
M] = sub_w
in_end = offset[i + 1]
out_end = offset[i + 1] - pad_size
if in_end <= in_begin:
continue
in_sub = x[in_begin:in_end, :]
col[out_begin:out_end, j * M:(j + 1) * M] += in_sub
return np.dot(col, filter)
class TestSeqProject(OpTest):
def setUp(self):
self.init_test_case()
self.op_type = 'sequence_conv'
if self.context_length == 1 \
and self.context_start == 0 \
and self.padding_trainable:
print("If context_start is 0 " \
"and context_length is 1," \
" padding_trainable should be false.")
return
# one level, batch size
x = np.random.uniform(0.1, 1, [self.input_size[0],
self.input_size[1]]).astype('float32')
w = np.random.uniform(0.1, 1, [
self.context_length * self.input_size[1], self.output_represention
]).astype('float32')
begin_pad = np.max([0, -self.context_start])
end_pad = np.max([0, self.context_start + self.context_length - 1])
total_pad = begin_pad + end_pad
padding_data = np.random.uniform(
0.1, 1, [total_pad, self.input_size[1]]).astype('float32')
self.pad_data = padding_data
self.inputs = {
'X': (x, self.lod),
'Filter': w,
}
self.inputs_val = ['X', 'Filter']
self.inputs_val_no_x = ['Filter']
self.inputs_val_no_f = ['X']
if total_pad != 0:
self.inputs['PaddingData'] = padding_data
self.inputs_val = ['X', 'PaddingData', 'Filter']
self.inputs_val_no_x = ['PaddingData', 'Filter']
self.inputs_val_no_f = ['PaddingData', 'X']
self.attrs = {
'contextStart': self.context_start,
'contextLength': self.context_length,
'paddingTrainable': self.padding_trainable,
'contextStride': self.context_stride
}
out = seqconv(x, self.lod, w, self.context_length, self.context_start,
self.padding_trainable, self.pad_data)
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
if self.padding_trainable:
self.check_grad(
set(self.inputs_val), 'Out', max_relative_error=0.05)
def test_check_grad_input(self):
self.check_grad(
['X'],
'Out',
max_relative_error=0.05,
no_grad_set=set(self.inputs_val_no_x))
def test_check_grad_padding_data(self):
if self.padding_trainable:
self.check_grad(
['PaddingData'],
'Out',
max_relative_error=0.05,
no_grad_set=set(['X', 'Filter']))
def test_check_grad_Filter(self):
self.check_grad(
['Filter'],
'Out',
max_relative_error=0.05,
no_grad_set=set(self.inputs_val_no_f))
def test_check_grad_input_filter(self):
if self.padding_trainable:
self.check_grad(
['X', 'Filter'],
'Out',
max_relative_error=0.05,
no_grad_set=set(['PaddingData']))
def test_check_grad_padding_input(self):
if self.padding_trainable:
self.check_grad(
self.inputs_val_no_f,
'Out',
max_relative_error=0.05,
no_grad_set=set(['Filter']))
def test_check_grad_padding_filter(self):
if self.padding_trainable:
self.check_grad(
self.inputs_val_no_x,
'Out',
max_relative_error=0.05,
no_grad_set=set(['X']))
def init_test_case(self):
self.input_row = 11
self.context_start = 0
self.context_length = 1
self.padding_trainable = False
self.context_stride = 1
self.input_size = [self.input_row, 23]
offset_lod = [[0, 4, 5, 8, self.input_row]]
self.lod = [[]]
# convert from offset-based lod to length-based lod
for i in range(len(offset_lod[0]) - 1):
self.lod[0].append(offset_lod[0][i + 1] - offset_lod[0][i])
self.output_represention = 8 # output feature size
class TestSeqProjectCase1(TestSeqProject):
def init_test_case(self):
self.input_row = 11
self.context_start = -1
self.context_length = 3
self.padding_trainable = True
self.context_stride = 1
self.input_size = [self.input_row, 23]
offset_lod = [[0, 4, 5, 8, self.input_row]]
self.lod = [[]]
# convert from offset-based lod to length-based lod
for i in range(len(offset_lod[0]) - 1):
self.lod[0].append(offset_lod[0][i + 1] - offset_lod[0][i])
self.output_represention = 8 # output feature size
class TestSeqProjectCase2Len0(TestSeqProject):
def init_test_case(self):
self.input_row = 11
self.context_start = -1
self.context_length = 3
self.padding_trainable = True
self.context_stride = 1
self.input_size = [self.input_row, 23]
offset_lod = [[0, 0, 4, 5, 5, 8, self.input_row, self.input_row]]
self.lod = [[]]
# convert from offset-based lod to length-based lod
for i in range(len(offset_lod[0]) - 1):
self.lod[0].append(offset_lod[0][i + 1] - offset_lod[0][i])
self.output_represention = 8 # output feature size
class TestSeqProjectCase3(TestSeqProject):
def init_test_case(self):
self.input_row = 25
self.context_start = 2
self.context_length = 3
self.padding_trainable = True
self.context_stride = 1
self.input_size = [self.input_row, 23]
idx = list(range(self.input_size[0]))
del idx[0]
offset_lod = [[0] + np.sort(random.sample(idx, 8)).tolist() +
[self.input_size[0]]]
self.lod = [[]]
# convert from offset-based lod to length-based lod
for i in range(len(offset_lod[0]) - 1):
self.lod[0].append(offset_lod[0][i + 1] - offset_lod[0][i])
self.output_represention = 8 # output feature size
class TestSeqConvApi(unittest.TestCase):
def test_api(self):
import paddle.fluid as fluid
x = fluid.layers.data('x', shape=[32], lod_level=1)
y = fluid.layers.sequence_conv(
input=x, num_filters=2, filter_size=3, padding_start=None)
place = fluid.CPUPlace()
x_tensor = fluid.create_lod_tensor(
np.random.rand(10, 32).astype("float32"), [[2, 3, 1, 4]], place)
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
ret = exe.run(feed={'x': x_tensor}, fetch_list=[y], return_numpy=False)
if __name__ == '__main__':
unittest.main()
| |
import sys
import unittest
import warnings
warnings.simplefilter('ignore', DeprecationWarning)
from eventlet import proc
warnings.simplefilter('default', DeprecationWarning)
from eventlet import coros
from eventlet import event as _event
from eventlet import Timeout, sleep, getcurrent, with_timeout
from tests import LimitedTestCase, skipped, silence_warnings
DELAY = 0.01
class ExpectedError(Exception):
pass
class TestLink_Signal(LimitedTestCase):
@silence_warnings
def test_send(self):
s = proc.Source()
q1, q2, q3 = coros.queue(), coros.queue(), coros.queue()
s.link_value(q1)
self.assertRaises(Timeout, s.wait, 0)
assert s.wait(0, None) is None
assert s.wait(0.001, None) is None
self.assertRaises(Timeout, s.wait, 0.001)
s.send(1)
assert not q1.ready()
assert s.wait()==1
sleep(0)
assert q1.ready()
s.link_exception(q2)
s.link(q3)
assert not q2.ready()
sleep(0)
assert q3.ready()
assert s.wait()==1
@silence_warnings
def test_send_exception(self):
s = proc.Source()
q1, q2, q3 = coros.queue(), coros.queue(), coros.queue()
s.link_exception(q1)
s.send_exception(OSError('hello'))
sleep(0)
assert q1.ready()
s.link_value(q2)
s.link(q3)
assert not q2.ready()
sleep(0)
assert q3.ready()
self.assertRaises(OSError, q1.wait)
self.assertRaises(OSError, q3.wait)
self.assertRaises(OSError, s.wait)
class TestProc(LimitedTestCase):
def test_proc(self):
p = proc.spawn(lambda : 100)
receiver = proc.spawn(sleep, 1)
p.link(receiver)
self.assertRaises(proc.LinkedCompleted, receiver.wait)
receiver2 = proc.spawn(sleep, 1)
p.link(receiver2)
self.assertRaises(proc.LinkedCompleted, receiver2.wait)
def test_event(self):
p = proc.spawn(lambda : 100)
event = _event.Event()
p.link(event)
self.assertEqual(event.wait(), 100)
for i in xrange(3):
event2 = _event.Event()
p.link(event2)
self.assertEqual(event2.wait(), 100)
def test_current(self):
p = proc.spawn(lambda : 100)
p.link()
self.assertRaises(proc.LinkedCompleted, sleep, 0.1)
class TestCase(LimitedTestCase):
def link(self, p, listener=None):
getattr(p, self.link_method)(listener)
def tearDown(self):
LimitedTestCase.tearDown(self)
self.p.unlink()
def set_links(self, p, first_time, kill_exc_type):
event = _event.Event()
self.link(p, event)
proc_flag = []
def receiver():
sleep(DELAY)
proc_flag.append('finished')
receiver = proc.spawn(receiver)
self.link(p, receiver)
queue = coros.queue(1)
self.link(p, queue)
try:
self.link(p)
except kill_exc_type:
if first_time:
raise
else:
assert first_time, 'not raising here only first time'
callback_flag = ['initial']
self.link(p, lambda *args: callback_flag.remove('initial'))
for _ in range(10):
self.link(p, _event.Event())
self.link(p, coros.queue(1))
return event, receiver, proc_flag, queue, callback_flag
def set_links_timeout(self, link):
# stuff that won't be touched
event = _event.Event()
link(event)
proc_finished_flag = []
def myproc():
sleep(10)
proc_finished_flag.append('finished')
return 555
myproc = proc.spawn(myproc)
link(myproc)
queue = coros.queue(0)
link(queue)
return event, myproc, proc_finished_flag, queue
def check_timed_out(self, event, myproc, proc_finished_flag, queue):
X = object()
assert with_timeout(DELAY, event.wait, timeout_value=X) is X
assert with_timeout(DELAY, queue.wait, timeout_value=X) is X
assert with_timeout(DELAY, proc.waitall, [myproc], timeout_value=X) is X
assert proc_finished_flag == [], proc_finished_flag
class TestReturn_link(TestCase):
link_method = 'link'
def test_return(self):
def return25():
return 25
p = self.p = proc.spawn(return25)
self._test_return(p, True, 25, proc.LinkedCompleted, lambda : sleep(0))
# repeating the same with dead process
for _ in xrange(3):
self._test_return(p, False, 25, proc.LinkedCompleted, lambda : sleep(0))
def _test_return(self, p, first_time, result, kill_exc_type, action):
event, receiver, proc_flag, queue, callback_flag = self.set_links(p, first_time, kill_exc_type)
# stuff that will time out because there's no unhandled exception:
xxxxx = self.set_links_timeout(p.link_exception)
try:
sleep(DELAY*2)
except kill_exc_type:
assert first_time, 'raising here only first time'
else:
assert not first_time, 'Should not raise LinkedKilled here after first time'
assert not p, p
self.assertEqual(event.wait(), result)
self.assertEqual(queue.wait(), result)
self.assertRaises(kill_exc_type, receiver.wait)
self.assertRaises(kill_exc_type, proc.waitall, [receiver])
sleep(DELAY)
assert not proc_flag, proc_flag
assert not callback_flag, callback_flag
self.check_timed_out(*xxxxx)
class TestReturn_link_value(TestReturn_link):
sync = False
link_method = 'link_value'
class TestRaise_link(TestCase):
link_method = 'link'
def _test_raise(self, p, first_time, kill_exc_type):
event, receiver, proc_flag, queue, callback_flag = self.set_links(p, first_time, kill_exc_type)
xxxxx = self.set_links_timeout(p.link_value)
try:
sleep(DELAY)
except kill_exc_type:
assert first_time, 'raising here only first time'
else:
assert not first_time, 'Should not raise LinkedKilled here after first time'
assert not p, p
self.assertRaises(ExpectedError, event.wait)
self.assertRaises(ExpectedError, queue.wait)
self.assertRaises(kill_exc_type, receiver.wait)
self.assertRaises(kill_exc_type, proc.waitall, [receiver])
sleep(DELAY)
assert not proc_flag, proc_flag
assert not callback_flag, callback_flag
self.check_timed_out(*xxxxx)
@silence_warnings
def test_raise(self):
p = self.p = proc.spawn(lambda : getcurrent().throw(ExpectedError('test_raise')))
self._test_raise(p, True, proc.LinkedFailed)
# repeating the same with dead process
for _ in xrange(3):
self._test_raise(p, False, proc.LinkedFailed)
def _test_kill(self, p, first_time, kill_exc_type):
event, receiver, proc_flag, queue, callback_flag = self.set_links(p, first_time, kill_exc_type)
xxxxx = self.set_links_timeout(p.link_value)
p.kill()
try:
sleep(DELAY)
except kill_exc_type:
assert first_time, 'raising here only first time'
else:
assert not first_time, 'Should not raise LinkedKilled here after first time'
assert not p, p
self.assertRaises(proc.ProcExit, event.wait)
self.assertRaises(proc.ProcExit, queue.wait)
self.assertRaises(kill_exc_type, proc.waitall, [receiver])
self.assertRaises(kill_exc_type, receiver.wait)
sleep(DELAY)
assert not proc_flag, proc_flag
assert not callback_flag, callback_flag
self.check_timed_out(*xxxxx)
@silence_warnings
def test_kill(self):
p = self.p = proc.spawn(sleep, DELAY)
self._test_kill(p, True, proc.LinkedKilled)
# repeating the same with dead process
for _ in xrange(3):
self._test_kill(p, False, proc.LinkedKilled)
class TestRaise_link_exception(TestRaise_link):
link_method = 'link_exception'
class TestStuff(LimitedTestCase):
def test_wait_noerrors(self):
x = proc.spawn(lambda : 1)
y = proc.spawn(lambda : 2)
z = proc.spawn(lambda : 3)
self.assertEqual(proc.waitall([x, y, z]), [1, 2, 3])
e = _event.Event()
x.link(e)
self.assertEqual(e.wait(), 1)
x.unlink(e)
e = _event.Event()
x.link(e)
self.assertEqual(e.wait(), 1)
self.assertEqual([proc.waitall([X]) for X in [x, y, z]], [[1], [2], [3]])
# this test is timing-sensitive
@skipped
def test_wait_error(self):
def x():
sleep(DELAY)
return 1
x = proc.spawn(x)
z = proc.spawn(lambda : 3)
y = proc.spawn(lambda : getcurrent().throw(ExpectedError('test_wait_error')))
y.link(x)
x.link(y)
y.link(z)
z.link(y)
self.assertRaises(ExpectedError, proc.waitall, [x, y, z])
self.assertRaises(proc.LinkedFailed, proc.waitall, [x])
self.assertEqual(proc.waitall([z]), [3])
self.assertRaises(ExpectedError, proc.waitall, [y])
def test_wait_all_exception_order(self):
# if there're several exceptions raised, the earliest one must be raised by wait
def first():
sleep(0.1)
raise ExpectedError('first')
a = proc.spawn(first)
b = proc.spawn(lambda : getcurrent().throw(ExpectedError('second')))
try:
proc.waitall([a, b])
except ExpectedError, ex:
assert 'second' in str(ex), repr(str(ex))
sleep(0.2) # sleep to ensure that the other timer is raised
def test_multiple_listeners_error(self):
# if there was an error while calling a callback
# it should not prevent the other listeners from being called
# also, all of the errors should be logged, check the output
# manually that they are
p = proc.spawn(lambda : 5)
results = []
def listener1(*args):
results.append(10)
raise ExpectedError('listener1')
def listener2(*args):
results.append(20)
raise ExpectedError('listener2')
def listener3(*args):
raise ExpectedError('listener3')
p.link(listener1)
p.link(listener2)
p.link(listener3)
sleep(DELAY*10)
assert results in [[10, 20], [20, 10]], results
p = proc.spawn(lambda : getcurrent().throw(ExpectedError('test_multiple_listeners_error')))
results = []
p.link(listener1)
p.link(listener2)
p.link(listener3)
sleep(DELAY*10)
assert results in [[10, 20], [20, 10]], results
def _test_multiple_listeners_error_unlink(self, p):
# notification must not happen after unlink even
# though notification process has been already started
results = []
def listener1(*args):
p.unlink(listener2)
results.append(5)
raise ExpectedError('listener1')
def listener2(*args):
p.unlink(listener1)
results.append(5)
raise ExpectedError('listener2')
def listener3(*args):
raise ExpectedError('listener3')
p.link(listener1)
p.link(listener2)
p.link(listener3)
sleep(DELAY*10)
assert results == [5], results
def test_multiple_listeners_error_unlink_Proc(self):
p = proc.spawn(lambda : 5)
self._test_multiple_listeners_error_unlink(p)
def test_multiple_listeners_error_unlink_Source(self):
p = proc.Source()
proc.spawn(p.send, 6)
self._test_multiple_listeners_error_unlink(p)
def test_killing_unlinked(self):
e = _event.Event()
def func():
try:
raise ExpectedError('test_killing_unlinked')
except:
e.send_exception(*sys.exc_info())
p = proc.spawn_link(func)
try:
try:
e.wait()
except ExpectedError:
pass
finally:
p.unlink() # this disables LinkedCompleted that otherwise would be raised by the next line
sleep(DELAY)
if __name__=='__main__':
unittest.main()
| |
import asyncio
import logging
import typing
import binascii
from lbry.error import InvalidBlobHashError, InvalidDataError
from lbry.blob_exchange.serialization import BlobResponse, BlobRequest
from lbry.utils import cache_concurrent
if typing.TYPE_CHECKING:
from lbry.blob.blob_file import AbstractBlob
from lbry.blob.writer import HashBlobWriter
from lbry.connection_manager import ConnectionManager
log = logging.getLogger(__name__)
class BlobExchangeClientProtocol(asyncio.Protocol):
def __init__(self, loop: asyncio.BaseEventLoop, peer_timeout: typing.Optional[float] = 10,
connection_manager: typing.Optional['ConnectionManager'] = None):
self.loop = loop
self.peer_port: typing.Optional[int] = None
self.peer_address: typing.Optional[str] = None
self.transport: typing.Optional[asyncio.Transport] = None
self.peer_timeout = peer_timeout
self.connection_manager = connection_manager
self.writer: typing.Optional['HashBlobWriter'] = None
self.blob: typing.Optional['AbstractBlob'] = None
self._blob_bytes_received = 0
self._response_fut: typing.Optional[asyncio.Future] = None
self.buf = b''
# this is here to handle the race when the downloader is closed right as response_fut gets a result
self.closed = asyncio.Event(loop=self.loop)
def data_received(self, data: bytes):
if self.connection_manager:
if not self.peer_address:
addr_info = self.transport.get_extra_info('peername')
self.peer_address, self.peer_port = addr_info
# assert self.peer_address is not None
self.connection_manager.received_data(f"{self.peer_address}:{self.peer_port}", len(data))
#log.debug("%s:%d -- got %s bytes -- %s bytes on buffer -- %s blob bytes received",
# self.peer_address, self.peer_port, len(data), len(self.buf), self._blob_bytes_received)
if not self.transport or self.transport.is_closing():
log.warning("transport closing, but got more bytes from %s:%i\n%s", self.peer_address, self.peer_port,
binascii.hexlify(data))
if self._response_fut and not self._response_fut.done():
self._response_fut.cancel()
return
if not self._response_fut:
log.warning("Protocol received data before expected, probable race on keep alive. Closing transport.")
return self.close()
if self._blob_bytes_received and not self.writer.closed():
return self._write(data)
response = BlobResponse.deserialize(self.buf + data)
if not response.responses and not self._response_fut.done():
self.buf += data
return
else:
self.buf = b''
if response.responses and self.blob:
blob_response = response.get_blob_response()
if blob_response and not blob_response.error and blob_response.blob_hash == self.blob.blob_hash:
# set the expected length for the incoming blob if we didn't know it
self.blob.set_length(blob_response.length)
elif blob_response and not blob_response.error and self.blob.blob_hash != blob_response.blob_hash:
# the server started sending a blob we didn't request
log.warning("mismatch with self.blob %s", self.blob.blob_hash)
return
if response.responses:
log.debug("got response from %s:%i <- %s", self.peer_address, self.peer_port, response.to_dict())
# fire the Future with the response to our request
self._response_fut.set_result(response)
if response.blob_data and self.writer and not self.writer.closed():
# log.debug("got %i blob bytes from %s:%i", len(response.blob_data), self.peer_address, self.peer_port)
# write blob bytes if we're writing a blob and have blob bytes to write
self._write(response.blob_data)
def _write(self, data: bytes):
if len(data) > (self.blob.get_length() - self._blob_bytes_received):
data = data[:(self.blob.get_length() - self._blob_bytes_received)]
log.warning("got more than asked from %s:%d, probable sendfile bug", self.peer_address, self.peer_port)
else:
data = data
self._blob_bytes_received += len(data)
try:
self.writer.write(data)
except IOError as err:
log.error("error downloading blob from %s:%i: %s", self.peer_address, self.peer_port, err)
if self._response_fut and not self._response_fut.done():
self._response_fut.set_exception(err)
except (asyncio.TimeoutError) as err: # TODO: is this needed?
log.error("%s downloading blob from %s:%i", str(err), self.peer_address, self.peer_port)
if self._response_fut and not self._response_fut.done():
self._response_fut.set_exception(err)
async def _download_blob(self) -> typing.Tuple[int, typing.Optional[asyncio.Transport]]:
"""
:return: download success (bool), keep connection (bool)
"""
request = BlobRequest.make_request_for_blob_hash(self.blob.blob_hash)
blob_hash = self.blob.blob_hash
if not self.peer_address:
addr_info = self.transport.get_extra_info('peername')
self.peer_address, self.peer_port = addr_info
try:
msg = request.serialize()
log.debug("send request to %s:%i -> %s", self.peer_address, self.peer_port, msg.decode())
self.transport.write(msg)
if self.connection_manager:
self.connection_manager.sent_data(f"{self.peer_address}:{self.peer_port}", len(msg))
response: BlobResponse = await asyncio.wait_for(self._response_fut, self.peer_timeout, loop=self.loop)
availability_response = response.get_availability_response()
price_response = response.get_price_response()
blob_response = response.get_blob_response()
if self.closed.is_set():
msg = f"cancelled blob request for {blob_hash} immediately after we got a response"
log.warning(msg)
raise asyncio.CancelledError(msg)
if (not blob_response or blob_response.error) and\
(not availability_response or not availability_response.available_blobs):
log.warning("%s not in availability response from %s:%i", self.blob.blob_hash, self.peer_address,
self.peer_port)
log.warning(response.to_dict())
return self._blob_bytes_received, self.close()
elif availability_response.available_blobs and \
availability_response.available_blobs != [self.blob.blob_hash]:
log.warning("blob availability response doesn't match our request from %s:%i",
self.peer_address, self.peer_port)
return self._blob_bytes_received, self.close()
if not price_response or price_response.blob_data_payment_rate != 'RATE_ACCEPTED':
log.warning("data rate rejected by %s:%i", self.peer_address, self.peer_port)
return self._blob_bytes_received, self.close()
if not blob_response or blob_response.error:
log.warning("blob cant be downloaded from %s:%i", self.peer_address, self.peer_port)
return self._blob_bytes_received, self.close()
if not blob_response.error and blob_response.blob_hash != self.blob.blob_hash:
log.warning("incoming blob hash mismatch from %s:%i", self.peer_address, self.peer_port)
return self._blob_bytes_received, self.close()
if self.blob.length is not None and self.blob.length != blob_response.length:
log.warning("incoming blob unexpected length from %s:%i", self.peer_address, self.peer_port)
return self._blob_bytes_received, self.close()
msg = f"downloading {self.blob.blob_hash[:8]} from {self.peer_address}:{self.peer_port}," \
f" timeout in {self.peer_timeout}"
log.debug(msg)
msg = f"downloaded {self.blob.blob_hash[:8]} from {self.peer_address}:{self.peer_port}"
await asyncio.wait_for(self.writer.finished, self.peer_timeout, loop=self.loop)
log.info(msg)
# await self.blob.finished_writing.wait() not necessary, but a dangerous change. TODO: is it needed?
return self._blob_bytes_received, self.transport
except asyncio.TimeoutError:
return self._blob_bytes_received, self.close()
except (InvalidBlobHashError, InvalidDataError):
log.warning("invalid blob from %s:%i", self.peer_address, self.peer_port)
return self._blob_bytes_received, self.close()
def close(self):
self.closed.set()
if self._response_fut and not self._response_fut.done():
self._response_fut.cancel()
if self.writer and not self.writer.closed():
self.writer.close_handle()
self._response_fut = None
self.writer = None
self.blob = None
if self.transport:
self.transport.close()
self.transport = None
self.buf = b''
async def download_blob(self, blob: 'AbstractBlob') -> typing.Tuple[int, typing.Optional[asyncio.Transport]]:
self.closed.clear()
blob_hash = blob.blob_hash
if blob.get_is_verified() or not blob.is_writeable():
return 0, self.transport
try:
self._blob_bytes_received = 0
self.blob, self.writer = blob, blob.get_blob_writer(self.peer_address, self.peer_port)
self._response_fut = asyncio.Future(loop=self.loop)
return await self._download_blob()
except OSError as e:
# i'm not sure how to fix this race condition - jack
log.warning("race happened downloading %s from %s:%i", blob_hash, self.peer_address, self.peer_port)
# return self._blob_bytes_received, self.transport
raise
except asyncio.TimeoutError:
if self._response_fut and not self._response_fut.done():
self._response_fut.cancel()
self.close()
return self._blob_bytes_received, None
except asyncio.CancelledError:
self.close()
raise
finally:
if self.writer and not self.writer.closed():
self.writer.close_handle()
self.writer = None
def connection_made(self, transport: asyncio.Transport):
addr = transport.get_extra_info('peername')
self.peer_address, self.peer_port = addr[0], addr[1]
self.transport = transport
if self.connection_manager:
self.connection_manager.connection_made(f"{self.peer_address}:{self.peer_port}")
log.debug("connection made to %s:%i", self.peer_address, self.peer_port)
def connection_lost(self, reason):
if self.connection_manager:
self.connection_manager.outgoing_connection_lost(f"{self.peer_address}:{self.peer_port}")
log.debug("connection lost to %s:%i (reason: %s, %s)", self.peer_address, self.peer_port, str(reason),
str(type(reason)))
self.close()
@cache_concurrent
async def request_blob(loop: asyncio.BaseEventLoop, blob: 'AbstractBlob', address: str, tcp_port: int,
peer_connect_timeout: float, blob_download_timeout: float,
connected_transport: asyncio.Transport = None, connection_id: int = 0,
connection_manager: typing.Optional['ConnectionManager'] = None)\
-> typing.Tuple[int, typing.Optional[asyncio.Transport]]:
"""
Returns [<downloaded blob>, <keep connection>]
"""
protocol = BlobExchangeClientProtocol(
loop, blob_download_timeout, connection_manager
)
if connected_transport and not connected_transport.is_closing():
connected_transport.set_protocol(protocol)
protocol.transport = connected_transport
log.debug("reusing connection for %s:%d", address, tcp_port)
else:
connected_transport = None
try:
if not connected_transport:
await asyncio.wait_for(loop.create_connection(lambda: protocol, address, tcp_port),
peer_connect_timeout, loop=loop)
if blob.get_is_verified() or not blob.is_writeable():
# file exists but not verified means someone is writing right now, give it time, come back later
return 0, connected_transport
return await protocol.download_blob(blob)
except (asyncio.TimeoutError, ConnectionRefusedError, ConnectionAbortedError, OSError):
return 0, None
| |
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import py4j
from py4j.protocol import Py4JJavaError
class JavaException(Exception):
def __init__(self, msg, stack_trace):
self.msg = msg
self.stack_trace = stack_trace
def __str__(self):
return self.msg + "\n\t at " + self.stack_trace
class TableException(JavaException):
"""
General Exception for all errors during table handling.
"""
class CatalogException(JavaException):
"""
A catalog-related exception.
"""
class DatabaseAlreadyExistException(JavaException):
"""
Exception for trying to create a database that already exists.
"""
class DatabaseNotEmptyException(JavaException):
"""
Exception for trying to drop on a database that is not empty.
"""
class DatabaseNotExistException(JavaException):
"""
Exception for trying to operate on a database that doesn't exist.
"""
class FunctionAlreadyExistException(JavaException):
"""
Exception for trying to create a function that already exists.
"""
class FunctionNotExistException(JavaException):
"""
Exception for trying to operate on a function that doesn't exist.
"""
class PartitionAlreadyExistsException(JavaException):
"""
Exception for trying to create a partition that already exists.
"""
class PartitionNotExistException(JavaException):
"""
Exception for operation on a partition that doesn't exist. The cause includes non-existent
table, non-partitioned table, invalid partition spec, etc.
"""
class PartitionSpecInvalidException(JavaException):
"""
Exception for invalid PartitionSpec compared with partition key list of a partitioned Table.
For example, it is thrown when the size of PartitionSpec exceeds the size of partition key
list, or when the size of PartitionSpec is 'n' but its keys don't match the first 'n' keys in
partition key list.
"""
class TableAlreadyExistException(JavaException):
"""
Exception for trying to create a table (or view) that already exists.
"""
class TableNotExistException(JavaException):
"""
Exception for trying to operate on a table (or view) that doesn't exist.
"""
class TableNotPartitionedException(JavaException):
"""
Exception for trying to operate partition on a non-partitioned table.
"""
# Mapping from JavaException to PythonException
exception_mapping = {
"org.apache.flink.table.api.TableException":
TableException,
"org.apache.flink.table.catalog.exceptions.CatalogException":
CatalogException,
"org.apache.flink.table.catalog.exceptions.DatabaseAlreadyExistException":
DatabaseAlreadyExistException,
"org.apache.flink.table.catalog.exceptions.DatabaseNotEmptyException":
DatabaseNotEmptyException,
"org.apache.flink.table.catalog.exceptions.DatabaseNotExistException":
DatabaseNotExistException,
"org.apache.flink.table.catalog.exceptions.FunctionAlreadyExistException":
FunctionAlreadyExistException,
"org.apache.flink.table.catalog.exceptions.FunctionNotExistException":
FunctionNotExistException,
"org.apache.flink.table.catalog.exceptions.PartitionAlreadyExistsException":
PartitionAlreadyExistsException,
"org.apache.flink.table.catalog.exceptions.PartitionNotExistException":
PartitionNotExistException,
"org.apache.flink.table.catalog.exceptions.PartitionSpecInvalidException":
PartitionSpecInvalidException,
"org.apache.flink.table.catalog.exceptions.TableAlreadyExistException":
TableAlreadyExistException,
"org.apache.flink.table.catalog.exceptions.TableNotExistException":
TableNotExistException,
"org.apache.flink.table.catalog.exceptions.TableNotPartitionedException":
TableNotPartitionedException,
}
def capture_java_exception(f):
def deco(*a, **kw):
try:
return f(*a, **kw)
except Py4JJavaError as e:
from pyflink.java_gateway import get_gateway
get_gateway().jvm.org.apache.flink.client.python.PythonEnvUtils\
.setPythonException(e.java_exception)
s = e.java_exception.toString()
stack_trace = '\n\t at '.join(map(lambda x: x.toString(),
e.java_exception.getStackTrace()))
for exception in exception_mapping.keys():
if s.startswith(exception):
java_exception = \
exception_mapping[exception](s.split(': ', 1)[1], stack_trace)
break
else:
raise
raise java_exception
return deco
def install_exception_handler():
"""
Hook an exception handler into Py4j, which could capture some exceptions in Java.
When calling Java API, it will call `get_return_value` to parse the returned object.
If any exception happened in JVM, the result will be a Java exception object, it raise
py4j.protocol.Py4JJavaError. We replace the original `get_return_value` with one that
could capture the Java exception and throw a Python one (with the same error message).
It's idempotent, could be called multiple times.
"""
original = py4j.protocol.get_return_value
# The original `get_return_value` is not patched, it's idempotent.
patched = capture_java_exception(original)
# only patch the one used in py4j.java_gateway (call Java API)
py4j.java_gateway.get_return_value = patched
def install_py4j_hooks():
"""
Hook the classes such as JavaPackage, etc of Py4j to improve the exception message.
"""
def wrapped_call(self, *args, **kwargs):
raise TypeError(
"Could not found the Java class '%s'. The Java dependencies could be specified via "
"command line argument '--jarfile' or the config option 'pipeline.jars'" % self._fqn)
setattr(py4j.java_gateway.JavaPackage, '__call__', wrapped_call)
def convert_py4j_exception(e: Py4JJavaError) -> JavaException:
"""
Convert Py4J exception to JavaException.
"""
def extract_java_stack_trace(java_stack_trace):
return '\n\t at '.join(map(lambda x: x.toString(), java_stack_trace))
s = e.java_exception.toString()
cause = e.java_exception.getCause()
stack_trace = extract_java_stack_trace(e.java_exception.getStackTrace())
while cause is not None:
stack_trace += '\nCaused by: %s: %s' % (cause.getClass().getName(), cause.getMessage())
stack_trace += "\n\t at " + extract_java_stack_trace(cause.getStackTrace())
cause = cause.getCause()
return JavaException(s.split(': ', 1)[1], stack_trace)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.