text
stringlengths
4
1.02M
meta
dict
from __future__ import division import theano import theano.tensor as T import numpy from scipy.spatial.distance import cdist def paraphrase_ranking(vectors, group): """Rank sentences by projection and return evaluation metrics.""" return ranking(vectors, vectors, group, ns=[4], exclude_self=True) def ranking(candidates, vectors, correct, ns=[1,5,10], exclude_self=False): """Rank `candidates` in order of similarity for each vector and return evaluation metrics. `correct[i][j]` indicates whether for vector i the candidate j is correct. """ #distances = cdist(vectors, candidates, metric='cosine') distances = Cdist(batch_size=2**13)(vectors, candidates) result = {'ranks' : [] , 'precision' : {}, 'recall' : {}, 'overlap' : {} } for n in ns: result['precision'][n] = [] result['recall'][n] = [] result['overlap'][n] = [] for j, row in enumerate(distances): ranked = numpy.argsort(row) if exclude_self: ranked = ranked[ranked!=j] id_correct = numpy.where(correct[j][ranked])[0] rank1 = id_correct[0] + 1 topn = {} for n in ns: id_topn = ranked[:n] overlap = len(set(id_topn).intersection(set(ranked[id_correct]))) result['precision'][n].append(overlap/n) result['recall' ][n].append(overlap/len(id_correct)) result['overlap' ][n].append(overlap) result['ranks'].append(rank1) return result class Cdist(): """Return cosine distances between two sets of vectors.""" def __init__(self, batch_size=None): self.batch_size = batch_size self.U = T.matrix('U') self.V = T.matrix('V') self.U_norm = self.U / self.U.norm(2, axis=1).reshape((self.U.shape[0], 1)) self.V_norm = self.V / self.V.norm(2, axis=1).reshape((self.V.shape[0], 1)) self.W = T.dot(self.U_norm, self.V_norm.T) self.cosine = theano.function([self.U, self.V], self.W) def __call__(self, A, B): if self.batch_size is None: chunks = [A] else: chunks = numpy.split(A, [i for i in range(self.batch_size, A.shape[0], self.batch_size) ]) cosines = numpy.vstack([self.cosine(chunk, B) for chunk in chunks]) return 1 - cosines
{ "content_hash": "459d29e8e899c824dccc22529c198d51", "timestamp": "", "source": "github", "line_count": 58, "max_line_length": 95, "avg_line_length": 40.60344827586207, "alnum_prop": 0.5876857749469214, "repo_name": "gchrupala/visually-grounded-speech", "id": "404a9613c09639d79008f99c5a27e28433084370", "size": "2413", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "imaginet/evaluate.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "88977" }, { "name": "R", "bytes": "8394" } ], "symlink_target": "" }
""" Defines PASCAL_VOC datatset handling. """ import numpy as np import os import xml.dom.minidom as minidom import tarfile from PIL import Image from neon.data.datasets import Dataset from neon.util.persist import save_obj from neon.util.persist import load_obj # background class is always indexed at 0 PASCAL_VOC_CLASSES = ('__background__', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor') PASCAL_VOC_NUM_CLASSES = 20 + 1 # 20 object classes and 1 background FRCN_IMG_PER_BATCH = 4 FRCN_ROI_PER_IMAGE = 64 # how many percentage should sample from the foreground obj FRCN_FG_FRAC = 0.25 FRCN_IOU_THRE = 0.5 # IoU threshold to be considered FRCN_FG_IOU_THRE = 0.5 # IoU threshold to be considered as foreground obj # IoU low threshold to be considered as background obj FRCN_BG_IOU_THRE_LOW = 0.1 FRCN_BG_IOU_THRE_HIGH = 0.5 FRCN_MIN_SCALE = 600 # 600 # the max image scales on the min dim FRCN_MAX_SCALE = 1000 # 1000 # the max image scales on the max dim # From Caffe: # Pixel mean values (BGR order) as a (1, 1, 3) array # These are the values originally used for training VGG16 # __C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]]) FRCN_PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]]) # the loaded image will be (H, W, C) need to make it (C, H, W) FRCN_IMG_DIM_SWAP = (2, 0, 1) FRCN_EPS = 1e-14 dataset_meta = { 'test-2007': dict(size=460032000, file='VOCtest_06-Nov-2007.tar', url='http://host.robots.ox.ac.uk/pascal/VOC/voc2007', subdir='VOCdevkit/VOC2007'), 'trainval-2007': dict(size=451020800, file='VOCtrainval_06-Nov-2007.tar', url='http://host.robots.ox.ac.uk/pascal/VOC/voc2007', subdir='VOCdevkit/VOC2007'), 'trainval-2012': dict(size=2000000000, file='VOCtrainval_11-May-2012.tar', url='http://host.robots.ox.ac.uk/pascal/VOC/voc2012', subdir='VOCdevkit/VOC2012'), 'selective-search': dict(size=628395563, file='selective_search_data_pkl.tar.gz', url='https://s3-us-west-1.amazonaws.com/nervana-pascal-voc-data', subdir='selective_search_data_pkl') } class PASCALVOC(Dataset): """ Construct a PASCAL VOC dataset object. For now, it will also load precomputed selective search results as ROIs. The structure of VOC is: $VOC_ROOT: path/VOCdevkit/VOC2007 $VOC_ROOT/ImageSet/Main/train.txt (or test.txt etc.): image index file $VOC_ROOT/Annotations/*.xml: classes and bb for each image Notes: 1. ground truth bounding rp are 1-based pixel coordinates, need to make it 0-based for input data 2. bounding box coordinate: (x_min, y_min, x_max, y_max) Args: image_set (str) : 'trainval' or 'test' year (String) : e.g. '2007' path (String) : Path to data file add_flipped (Bool) : whether to enhance the dataset with flipped images overlap_thre (Float): the IOU threshold of bbox to be used for training output_type (Int, optional): the type of data iterator will yield, to provide data for FRCN or its variants 0 (normal FRCN model) -- X: (image, rois) Y: (labels, (bb targets,bb mask)) 1 (label stream with ROI) -- X: (image, rois) Y: (labels) 2 (label stream no ROI) -- X: image Y: labels n_mb (Int, optional): how many minibatch to iterate through, can use value smaller than nbatches for debugging img_per_batch (Int, optional): how many images processed per batch rois_per_img (Int, optional): how many rois to pool from each image rois_random_sample (Bool, optional): randomly sample the ROIs. Default to be true """ def __init__(self, image_set, year, path='.', add_flipped=False, overlap_thre=None, output_type=0, n_mb=None, img_per_batch=None, rois_per_img=None, rois_random_sample=True, shuffle=False): self.isRoiDB = True self.batch_index = 0 self.year = year self.image_set = image_set self.add_flipped = add_flipped self.overlap_thre = overlap_thre if overlap_thre else FRCN_IOU_THRE self.output_type = output_type # how many ROIs per image self.rois_per_image = rois_per_img if rois_per_img else FRCN_ROI_PER_IMAGE self.img_per_batch = img_per_batch if img_per_batch else FRCN_IMG_PER_BATCH self.fg_rois_per_image = FRCN_FG_FRAC * self.rois_per_image self.bg_rois_per_image = self.rois_per_image - self.fg_rois_per_image self.rois_per_batch = self.rois_per_image * self.img_per_batch self.rois_random_sample = rois_random_sample self.shuffle = shuffle self.cache_file_name = 'voc_{}_{}_flip_{}_ovlp_{}.pkl'.format(self.year, self.image_set, self.add_flipped, self.overlap_thre) print 'prepare PASCAL VOC {} from year {}: add flipped image {} and overlap threshold {}'\ .format(self.image_set, self.year, self.add_flipped, self.overlap_thre) # PASCAL class to index self.num_classes = PASCAL_VOC_NUM_CLASSES self._class_to_index = dict( zip(PASCAL_VOC_CLASSES, xrange(self.num_classes))) # load the voc dataset self.voc_root = self.load_voc(image_set, year, path) self.cache_file = os.path.join(self.voc_root, self.cache_file_name) # load the precomputed ss results from voc data, it includes both 2007 and 2012 data self.ss_path = self.load_voc('ss', None, path) # VOC paths and infos self.image_index_file = os.path.join(self.voc_root, 'ImageSets', 'Main', self.image_set + '.txt') self.image_path = os.path.join(self.voc_root, 'JPEGImages') self._image_file_ext = '.jpg' self.annotation_path = os.path.join(self.voc_root, 'Annotations') self._annotation_file_ext = '.xml' self._annotation_obj_tag = 'object' self._annotation_class_tag = 'name' self._annotation_xmin_tag = 'xmin' self._annotation_xmax_tag = 'xmax' self._annotation_ymin_tag = 'ymin' self._annotation_ymax_tag = 'ymax' self._selective_search_ext = '.pkl' self.selective_search_file = os.path.join( self.ss_path, '_'.join(['voc', year, self.image_set, 'selectivesearch.pkl'])) self._bb_xmin_idx = 0 self._bb_ymin_idx = 1 self._bb_xmax_idx = 2 self._bb_ymax_idx = 3 # self.rois_per_batch is 128 (2*64) ROIs # But the image path batch size is self.img_per_batch # need to control the batch size here print "Backend batchsize is changed to be image_per_batch from PASCAL_VOC dataset" self.be.bsz = self.img_per_batch # backend tensor to push the data self.image_shape = (3, FRCN_MAX_SCALE, FRCN_MAX_SCALE) self.img_np = np.zeros( (3, FRCN_MAX_SCALE, FRCN_MAX_SCALE, self.be.bsz), dtype=np.float32) self.dev_X_img = self.be.iobuf(self.image_shape, dtype=np.float32) self.dev_X_img_chw = self.dev_X_img.reshape( 3, FRCN_MAX_SCALE, FRCN_MAX_SCALE, self.be.bsz) # for rois, features are 4 + 1 (idx within the batch) self.dev_X_rois = self.be.zeros((self.rois_per_batch, 5)) self.dev_y_labels_flat = self.be.zeros( (1, self.rois_per_batch), dtype=np.int32) self.dev_y_labels = self.be.zeros( (self.num_classes, self.rois_per_batch), dtype=np.int32) self.dev_y_bbtargets = self.be.zeros( (self.num_classes * 4, self.rois_per_batch)) self.dev_y_bbmask = self.be.zeros( (self.num_classes * 4, self.rois_per_batch)) # the shape will indicate the shape for 1st path (ImageNet model), and # 2nd path (ROIs) self.shape = [self.image_shape, self.num_classes * 4] # Need to do the following: # 1. load the image index list # 2. for each image, load the ground truth from pascal annotation # 3. load the selective search ROIs (this step needs gt ROIs) # 4.1. merge the ROIs # 4.2. may have to add the flipped images for training # 4.3. add the fields for max overlap and max overlapped classes # 4.4. add the bounding box targets for regression # 5. during minibatch feeding: # - rescale images # - rescale ROIs # - random select foreground ROIs (bigger ones) # - random select background ROIS (smaller ones) # - clamp bg ROI labels (to be 0) # - convert ROIs into the regression target (ROIs, 4*21) # 1. assert os.path.exists(self.image_index_file), \ 'Image index file does not exist: {}'.format(self.image_index_file) with open(self.image_index_file) as f: self.image_index = [x.strip() for x in f.readlines()] # self.image_index = image_index * 2 if self.add_flipped else image_index self.num_images = len(self.image_index) self.num_image_entries = self.num_images * \ 2 if self.add_flipped else self.num_images self.ndata = self.num_image_entries * self.rois_per_image self.nbatches = self.num_image_entries/self.img_per_batch if n_mb is not None: self.nbatches = n_mb if os.path.exists(self.cache_file): self.roi_db = load_obj(self.cache_file) print 'ROI dataset loaded from file {}'.format(self.cache_file) else: # 2. self.roi_gt = self.load_pascal_roi_groundtruth() # 3. self.roi_ss = self.load_pascal_roi_selectivesearch() # 4. self.roi_db = self.combine_gt_ss_roi() save_obj(self.roi_db, self.cache_file) print 'wrote ROI dataset to {}'.format(self.cache_file) def load_voc(self, dataset, year=None, path="."): """ dataset: 'trainval', 'test', or 'ss' year: 2007 or 2012 if not 'ss', otherwise None For selective search data Fetch the pre-computed selective search data which are converted from the MAT files available from http://www.cs.berkeley.edu/~rbg/fast-rcnn-data/selective_search_data.tgz """ dataset = 'selective-search' if year is None else '-'.join([dataset, year]) voc = dataset_meta[dataset] workdir, filepath, datadir = self._valid_path_append(path, '', voc['file'], voc['subdir']) if not os.path.exists(filepath): self.fetch_dataset(voc['url'], voc['file'], filepath, voc['size']) with tarfile.open(filepath) as f: f.extractall(workdir) return datadir def __iter__(self): """ Generator that can be used to iterate over this dataset. Each minibatch is constructed from self.img_per_batch images, and FRCN_BATCH_SIZE ROIs 1. At the begining of the epoch, shuffle the dataset instances 2. For each minibatch, sample the ROIs from each image Yields: tuples, tuples, first tuple contains image that goes into an ImageNet model and ROI data second tuple contains class labels for each ROIs and bounding box regression targets """ self.batch_index = 0 # permute the dataset each epoch if self.shuffle is False: shuf_idx = range(self.num_image_entries) else: shuf_idx = self.be.rng.permutation(self.num_image_entries) for self.batch_index in xrange(self.nbatches): start = self.batch_index * self.img_per_batch end = (self.batch_index + 1) * self.img_per_batch db_inds = shuf_idx[start:end] mb_db = [self.roi_db[i] for i in db_inds] rois_mb = np.zeros((self.rois_per_batch, 5), dtype=np.float32) labels_blob = np.zeros((self.rois_per_batch), dtype=np.int32) bbox_targets_blob = np.zeros((self.rois_per_batch, 4 * self.num_classes), dtype=np.float32) bbox_loss_blob = np.zeros( bbox_targets_blob.shape, dtype=np.float32) self.img_np[:] = 0 for im_i, db in enumerate(mb_db): # load and process the image using PIL im = Image.open(db['img_file']) # This is RGB order im_shape = np.array(im.size, np.int32) im_size_min = np.min(im_shape) im_size_max = np.max(im_shape) im_scale = float(FRCN_MIN_SCALE) / float(im_size_min) # Prevent the biggest axis from being more than FRCN_MAX_SCALE if np.round(im_scale * im_size_max) > FRCN_MAX_SCALE: im_scale = float(FRCN_MAX_SCALE) / float(im_size_max) im_shape = (im_shape * im_scale).astype(int) im = im.resize(im_shape, Image.LINEAR) # load it to numpy and flip the channel RGB to BGR im = np.array(im)[:, :, ::-1] if db['flipped']: im = im[:, ::-1, :] # Mean subtract and scale an image im = im.astype(np.float32, copy=False) im -= FRCN_PIXEL_MEANS # Sample fore-ground and back-ground ROIs from the proposals and labels labels, overlaps, im_rois, bbox_targets, bbox_loss \ = _sample_fg_bg_rois(db, self.fg_rois_per_image, self.rois_per_image, self.num_classes, self.rois_random_sample) # Add to RoIs blob rois = im_rois * im_scale num_rois_this_image = rois.shape[0] slice_i = slice(im_i * self.rois_per_image, im_i * self.rois_per_image + num_rois_this_image) batch_ind = im_i * np.ones((num_rois_this_image, 1)) # add the corresponding image ind (within this batch) to the ROI data rois_this_image = np.hstack((batch_ind, rois)) rois_mb[slice_i] = rois_this_image # Add to labels, bbox targets, and bbox loss blobs labels_blob[slice_i] = labels.ravel() bbox_targets_blob[slice_i] = bbox_targets bbox_loss_blob[slice_i] = bbox_loss # write it to backend tensor self.img_np[:, :im_shape[1], :im_shape[0], im_i] = im.transpose(FRCN_IMG_DIM_SWAP) self.dev_X_img_chw.set(self.img_np) self.dev_X_rois[:] = rois_mb self.dev_y_labels_flat[:] = labels_blob.reshape(1, -1) self.dev_y_labels[:] = self.be.onehot( self.dev_y_labels_flat, axis=0) self.dev_y_bbtargets[:] = bbox_targets_blob.T.astype( np.float, order='C') self.dev_y_bbmask[:] = bbox_loss_blob.T.astype(np.int32, order='C') if self.output_type == 0: X = (self.dev_X_img, self.dev_X_rois) Y = (self.dev_y_labels, (self.dev_y_bbtargets, self.dev_y_bbmask)) elif self.output_type == 1: X = (self.dev_X_img, self.dev_X_rois) Y = self.dev_y_labels elif self.output_type == 2: X = self.dev_X_img Y = self.dev_y_labels else: raise ValueError( 'Do not support output_type to be {}'.format(self.output_type)) yield X, Y def reset(self): """ For resetting the starting index of this dataset back to zero. """ self.batch_index = 0 def load_pascal_roi_groundtruth(self): """ load the voc database ground truth ROIs """ return [self.load_pascal_annotation(img) for img in self.image_index] def load_pascal_annotation(self, image_index): """ For a particular image, load ground truth annotations of object classes and their bounding rp from the pascal voc dataset files are in the VOC directory/Annotations. Each xml file corresponds to a particular image index """ annotation_file = os.path.join(self.annotation_path, image_index + self._annotation_file_ext) with open(annotation_file) as f: annotation_data = minidom.parseString(f.read()) # how many objects in it objs = annotation_data.getElementsByTagName(self._annotation_obj_tag) num_objs = len(objs) # initialize ground truth classes and bb gt_bb = np.zeros((num_objs, 4), dtype=np.uint16) gt_classes = np.zeros((num_objs, 1), dtype=np.int32) gt_overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32) gt_max_overlap = np.zeros((num_objs, 1)) gt_max_class = np.zeros((num_objs, 1)) # load all the info for idx, obj in enumerate(objs): x1 = float(load_data_from_xml_tag(obj, self._annotation_xmin_tag)) - 1 y1 = float(load_data_from_xml_tag(obj, self._annotation_ymin_tag)) - 1 x2 = float(load_data_from_xml_tag(obj, self._annotation_xmax_tag)) - 1 y2 = float(load_data_from_xml_tag(obj, self._annotation_ymax_tag)) - 1 cls = self._class_to_index[ str(load_data_from_xml_tag(obj, self._annotation_class_tag)). lower().strip()] gt_bb[idx] = [x1, y1, x2, y2] gt_classes[idx] = cls gt_overlaps[idx, cls] = 1.0 gt_max_overlap[idx] = 1.0 gt_max_class[idx] = cls gt_bb_target = np.zeros((num_objs, 5)) gt_bb_target[:, 0] = gt_max_class.ravel() return {'gt_bb': gt_bb, 'gt_classes': gt_classes, 'gt_overlaps': gt_overlaps, 'img_id': image_index, 'max_overlap_area': gt_max_overlap, 'max_overlap_class': gt_max_class, 'bb_targets': gt_bb_target, } def load_pascal_roi_selectivesearch(self): """ Load the pre-computed selective search data on PASCAL VOC in pickle file The pickle file contains images and rp: images: image indices for the dataset (Img, 1) name in string is in images[i][0][0] rp: all the proposed ROIs for each image (Img, 1) in bb[i], there are (B, 4) for B proposed ROIs The coordinates are ordered as: [y1, x1, y2, x2] While ground truth coordinates are: [x1, y1, x2, y2] So it needs re-ordering """ assert self.roi_gt is not None, 'Ground truth ROIs need to be loaded first' assert os.path.exists(self.selective_search_file), \ 'selected search data does not exist' ss_data = load_obj(self.selective_search_file) ss_bb = ss_data['boxes'].ravel() ss_img_idx = ss_data['images'].ravel() ss_num_img = ss_bb.shape[0] assert ss_num_img == self.num_images, \ 'Number of images in SS data must match number of image in the dataset' roi_ss = [] # load the bb from SS and compare with gt for i in xrange(ss_num_img): # make sure the image index match assert self.image_index[i] == ss_img_idx[i][0] bb = (ss_bb[i][:, (1, 0, 3, 2)] - 1) num_boxes = bb.shape[0] overlaps = np.zeros( (num_boxes, self.num_classes), dtype=np.float32) gt_bb = self.roi_gt[i]['gt_bb'] gt_classes = self.roi_gt[i]['gt_classes'].ravel() gt_overlap, gt_dim = calculate_bb_overlap(bb.astype(np.float), gt_bb.astype(np.float)) max_overlap_area = gt_overlap.max(axis=gt_dim) max_overlap_arg = gt_overlap.argmax(axis=gt_dim) # only put the non-zero overlaps into the table I = np.where(max_overlap_area > 0)[0] overlaps[I, gt_classes[max_overlap_arg[I]]] = max_overlap_area[I] max_overlap_class = overlaps.argmax(axis=gt_dim) max_overlaps = overlaps.max(axis=gt_dim) # prepare the bounding box targets ss_bb_targets = np.zeros((num_boxes, 5), np.float32) # only the ones with large enough overlap with gt are used use_idx = np.where(max_overlaps >= self.overlap_thre)[0] bb_targets = self._compute_bb_targets(gt_bb[max_overlap_arg[use_idx]], bb[use_idx], max_overlap_class[use_idx]) ss_bb_targets[use_idx] = bb_targets roi_ss.append({ 'ss_bb': bb, 'gt_classes': np.zeros((num_boxes, 1), dtype=np.int32), 'gt_overlaps': overlaps, 'max_overlap_area': max_overlap_area.reshape(-1, 1), 'max_overlap_class': max_overlap_class.reshape(-1, 1), 'bb_targets': ss_bb_targets, }) return roi_ss def combine_gt_ss_roi(self): assert len(self.roi_gt) == len(self.roi_ss) == self.num_images, \ 'ROIs from GT and SS do not match the dataset images' # Compute values needed for means and stds class_counts = np.zeros((self.num_classes, 1), ) + FRCN_EPS sums = np.zeros((self.num_classes, 4)) squared_sums = np.zeros((self.num_classes, 4)) roi_gt_ss = [None] * self.num_image_entries for i in xrange(self.num_images): roi_gt_ss[i] = {} roi_gt_ss[i]['bb'] = np.vstack((self.roi_gt[i]['gt_bb'], self.roi_ss[i]['ss_bb'])) roi_gt_ss[i]['gt_classes'] = np.vstack((self.roi_gt[i]['gt_classes'], self.roi_ss[i]['gt_classes'])) roi_gt_ss[i]['gt_overlaps'] = np.vstack([self.roi_gt[i]['gt_overlaps'], self.roi_ss[i]['gt_overlaps']]) roi_gt_ss[i]['max_overlap_area'] = np.vstack([self.roi_gt[i]['max_overlap_area'], self.roi_ss[i]['max_overlap_area']]) roi_gt_ss[i]['max_overlap_class'] = np.vstack([self.roi_gt[i]['max_overlap_class'], self.roi_ss[i]['max_overlap_class']]) roi_gt_ss[i]['img_id'] = self.roi_gt[i]['img_id'] roi_gt_ss[i]['flipped'] = False image_file = os.path.join(self.image_path, self.roi_gt[i]['img_id'] + self._image_file_ext) roi_gt_ss[i]['img_file'] = image_file # add bounding box targets for training bb_targets = np.vstack([self.roi_gt[i]['bb_targets'], self.roi_ss[i]['bb_targets']]) roi_gt_ss[i]['bb_targets'] = bb_targets for cls in xrange(1, self.num_classes): cls_inds = np.where(bb_targets[:, 0] == cls)[0] if cls_inds.size > 0: class_counts[cls] += cls_inds.size sums[cls, :] += bb_targets[cls_inds, 1:].sum(axis=0) squared_sums[cls, :] += (bb_targets[cls_inds, 1:] ** 2).sum(axis=0) if self.add_flipped: width = Image.open(image_file).size[0] fliped_bb = roi_gt_ss[i]['bb'].copy() fliped_bb[:, self._bb_xmin_idx] = width - \ roi_gt_ss[i]['bb'][:, self._bb_xmax_idx] - 1 fliped_bb[:, self._bb_xmax_idx] = width - \ roi_gt_ss[i]['bb'][:, self._bb_xmin_idx] - 1 bb_targets_flipped = bb_targets bb_targets_flipped[:, 1] *= -1 roi_gt_ss[i + self.num_images] = { 'bb': fliped_bb, 'gt_classes': roi_gt_ss[i]['gt_classes'], 'gt_overlaps': roi_gt_ss[i]['gt_overlaps'], 'max_overlap_area': roi_gt_ss[i]['max_overlap_area'], 'max_overlap_class': roi_gt_ss[i]['max_overlap_class'], 'img_id': roi_gt_ss[i]['img_id'], 'flipped': True, 'img_file': image_file, 'bb_targets': bb_targets_flipped } for cls in xrange(1, self.num_classes): cls_inds = np.where(bb_targets[:, 0] == cls)[0] if cls_inds.size > 0: class_counts[cls] += cls_inds.size sums[cls, :] += bb_targets_flipped[cls_inds, 1:].sum(axis=0) squared_sums[ cls, :] += (bb_targets_flipped[cls_inds, 1:] ** 2).sum(axis=0) means = sums / class_counts stds = np.sqrt(squared_sums / class_counts - means ** 2) # Normalize targets for i in xrange(self.num_images): targets = roi_gt_ss[i]['bb_targets'] for cls in xrange(1, self.num_classes): cls_inds = np.where(targets[:, 0] == cls)[0] roi_gt_ss[i]['bb_targets'][cls_inds, 1:] -= means[cls, :] roi_gt_ss[i]['bb_targets'][cls_inds, 1:] /= stds[cls, :] return roi_gt_ss def _compute_bb_targets(self, gt_bb, rp_bb, labels): rp_widths = rp_bb[:, 2] - rp_bb[:, 0] + FRCN_EPS rp_heights = rp_bb[:, 3] - rp_bb[:, 1] + FRCN_EPS rp_ctr_x = rp_bb[:, 0] + 0.5 * rp_widths rp_ctr_y = rp_bb[:, 1] + 0.5 * rp_heights gt_widths = gt_bb[:, 2] - gt_bb[:, 0] + FRCN_EPS gt_heights = gt_bb[:, 3] - gt_bb[:, 1] + FRCN_EPS gt_ctr_x = gt_bb[:, 0] + 0.5 * gt_widths gt_ctr_y = gt_bb[:, 1] + 0.5 * gt_heights targets_dx = (gt_ctr_x - rp_ctr_x) / rp_widths targets_dy = (gt_ctr_y - rp_ctr_y) / rp_heights targets_dw = np.log(gt_widths / rp_widths) targets_dh = np.log(gt_heights / rp_heights) targets = np.concatenate((labels[:, np.newaxis], targets_dx[:, np.newaxis], targets_dy[:, np.newaxis], targets_dw[:, np.newaxis], targets_dh[:, np.newaxis], ), axis=1) return targets def calculate_bb_overlap(rp, gt): """ calculate the overlaps between 2 list of bounding rp Arguments: rp (list): an array of region proposals, shape (R, 4) gt (list): an array of ground truth ROIs, shape (G, 4) Outputs: overlaps: a matrix of overlaps between 2 list, shape (R, G) """ gt_dim = 1 R = rp.shape[0] G = gt.shape[0] overlaps = np.zeros((R, G), dtype=np.float32) for g in range(G): gt_box_area = float( (gt[g, 2] - gt[g, 0] + 1) * (gt[g, 3] - gt[g, 1] + 1) ) for r in range(R): iw = float( min(rp[r, 2], gt[g, 2]) - max(rp[r, 0], gt[g, 0]) + 1 ) if iw > 0: ih = float( min(rp[r, 3], gt[g, 3]) - max(rp[r, 1], gt[g, 1]) + 1 ) if ih > 0: ua = float( (rp[r, 2] - rp[r, 0] + 1) * (rp[r, 3] - rp[r, 1] + 1) + gt_box_area - iw * ih ) overlaps[r, g] = iw * ih / ua return overlaps, gt_dim def _sample_fg_bg_rois(roidb, fg_rois_per_image, rois_per_image, num_classes, randomness): """Generate a random sample of RoIs comprising foreground and background examples. """ # label = class RoI has max overlap with labels = roidb['max_overlap_class'] overlaps = roidb['max_overlap_area'] rois = roidb['bb'] # Select foreground RoIs as those with >= FG_THRESH overlap fg_inds = np.where(overlaps >= FRCN_FG_IOU_THRE)[0] # Guard against the case when an image has fewer than fg_rois_per_image # foreground RoIs fg_rois_per_this_image = int(np.minimum(fg_rois_per_image, fg_inds.size)) # Sample foreground regions without replacement if fg_inds.size > 0: if randomness is True: fg_inds = np.random.choice(fg_inds, size=fg_rois_per_this_image, replace=False) else: fg_inds = fg_inds[range(fg_rois_per_this_image)] # Select background RoIs as those within [FRCN_BG_IOU_THRE_LOW, FRCN_BG_IOU_THRE_HIGH) bg_inds = np.where((overlaps < FRCN_BG_IOU_THRE_HIGH) & (overlaps >= FRCN_BG_IOU_THRE_LOW))[0] # Compute number of background RoIs to take from this image (guarding # against there being fewer than desired) bg_rois_per_this_image = int(np.minimum(rois_per_image - fg_rois_per_this_image, bg_inds.size)) # Sample foreground regions without replacement if bg_inds.size > 0: if randomness is True: bg_inds = np.random.choice(bg_inds, size=bg_rois_per_this_image, replace=False) else: bg_inds = bg_inds[range(bg_rois_per_this_image)] # The indices that we're selecting (both fg and bg) keep_inds = np.append(fg_inds, bg_inds) # Select sampled values from various arrays: labels = labels[keep_inds] # Clamp labels for the background RoIs to 0 labels[fg_rois_per_this_image:] = 0 overlaps = overlaps[keep_inds] rois = rois[keep_inds] bbox_targets, bbox_loss_weights = \ _get_bbox_regression_labels(roidb['bb_targets'][keep_inds, :], num_classes) return labels, overlaps, rois, bbox_targets, bbox_loss_weights def _get_bbox_regression_labels(bbox_target_data, num_classes): """Bounding-box regression targets are stored in a compact form in the roidb. This function expands those targets into the 4-of-4*K representation used by the network (i.e. only one class has non-zero targets). The loss weights are similarly expanded. Returns: bbox_target_data (ndarray): N x 4K blob of regression targets bbox_loss_weights (ndarray): N x 4K blob of loss weights """ clss = bbox_target_data[:, 0] bbox_targets = np.zeros((clss.size, 4 * num_classes), dtype=np.float32) bbox_loss_weights = np.zeros(bbox_targets.shape, dtype=np.float32) inds = np.where(clss > 0)[0] for ind in inds: cls = clss[ind] start = 4 * cls end = start + 4 bbox_targets[ind, start:end] = bbox_target_data[ind, 1:] bbox_loss_weights[ind, start:end] = [1., 1., 1., 1.] return bbox_targets, bbox_loss_weights def load_data_from_xml_tag(element, tag): return element.getElementsByTagName(tag)[0].childNodes[0].data
{ "content_hash": "79b4f7dfbe18be25a866b6ef435ad945", "timestamp": "", "source": "github", "line_count": 745, "max_line_length": 98, "avg_line_length": 43.251006711409396, "alnum_prop": 0.5388554403823475, "repo_name": "DougFirErickson/neon", "id": "150ad8e7a32a58bb070d801833b235a147d67ee6", "size": "32963", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "neon/data/pascal_voc.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "6534" }, { "name": "C++", "bytes": "67530" }, { "name": "CSS", "bytes": "696700" }, { "name": "Cuda", "bytes": "14937" }, { "name": "Makefile", "bytes": "10977" }, { "name": "Python", "bytes": "1436442" } ], "symlink_target": "" }
import functools import types def Cache(obj): """Decorator for caching read-only properties. Example usage (always returns the same Foo instance): @Cache def CreateFoo(): return Foo() If CreateFoo() accepts parameters, a separate cached value is maintained for each unique parameter combination. """ cache = obj.__cache = {} @functools.wraps(obj) def Cacher(*args, **kwargs): key = str(args) + str(kwargs) if key not in cache: cache[key] = obj(*args, **kwargs) return cache[key] return Cacher def Disabled(*args): """Decorator for disabling tests/benchmarks. May be used without args to unconditionally disable: @Disabled # Unconditionally disabled. If args are given, the test will be disabled if ANY of the args match the browser type, OS name or OS version: @Disabled('canary') # Disabled for canary browsers @Disabled('win') # Disabled on Windows. @Disabled('win', 'linux') # Disabled on both Windows and Linux. @Disabled('mavericks') # Disabled on Mac Mavericks (10.9) only. """ def _Disabled(func): if not isinstance(func, types.FunctionType): func._disabled_strings = disabled_strings return func @functools.wraps(func) def wrapper(*args, **kwargs): func(*args, **kwargs) wrapper._disabled_strings = disabled_strings return wrapper if len(args) == 1 and callable(args[0]): disabled_strings = [] return _Disabled(args[0]) disabled_strings = list(args) for disabled_string in disabled_strings: # TODO(tonyg): Validate that these strings are recognized. assert isinstance(disabled_string, str), '@Disabled accepts a list of strs' return _Disabled def Enabled(*args): """Decorator for enabling tests/benchmarks. The test will be enabled if ANY of the args match the browser type, OS name or OS version: @Enabled('canary') # Enabled only for canary browsers @Enabled('win') # Enabled only on Windows. @Enabled('win', 'linux') # Enabled only on Windows or Linux. @Enabled('mavericks') # Enabled only on Mac Mavericks (10.9). """ def _Enabled(func): if not isinstance(func, types.FunctionType): func._enabled_strings = enabled_strings return func @functools.wraps(func) def wrapper(*args, **kwargs): func(*args, **kwargs) wrapper._enabled_strings = enabled_strings return wrapper assert args and not callable(args[0]), '@Enabled requires argumentas' enabled_strings = list(args) for enabled_string in enabled_strings: # TODO(tonyg): Validate that these strings are recognized. assert isinstance(enabled_string, str), '@Enabled accepts a list of strs' return _Enabled # pylint: disable=W0212 def IsEnabled(test, browser_type, platform): """Returns True iff |test| is enabled given the |browser_type| and |platform|. Use to respect the @Enabled / @Disabled decorators. Args: test: A function or class that may contain _disabled_strings and/or _enabled_strings attributes. browser_type: A string representing the --browser string. platform: A platform.Platform instance for the target of |browser_type|. """ platform_attributes = [a.lower() for a in [ browser_type, platform.GetOSName(), platform.GetOSVersionName(), ]] if hasattr(test, '_disabled_strings'): disabled_strings = test._disabled_strings if not disabled_strings: return False # No arguments to @Disabled means always disable. for disabled_string in disabled_strings: if disabled_string in platform_attributes: print ( 'Skipping %s because it is disabled for %s. ' 'You are running %s.' % (test.__name__, ' and '.join(disabled_strings), ' '.join(platform_attributes))) return False if hasattr(test, '_enabled_strings'): enabled_strings = test._enabled_strings if not enabled_strings: return True # No arguments to @Enabled means always enable. for enabled_string in enabled_strings: if enabled_string in platform_attributes: print ( 'Skipping %s because it is only enabled for %s. ' 'You are running %s.' % (test.__name__, ' or '.join(enabled_strings), ' '.join(platform_attributes))) return True return False return True
{ "content_hash": "7d40cc521d9cf23653a6b32f5f1199b7", "timestamp": "", "source": "github", "line_count": 131, "max_line_length": 80, "avg_line_length": 34.45038167938932, "alnum_prop": 0.6472413029027254, "repo_name": "ChromiumWebApps/chromium", "id": "9b3c38b3a3a13e49547a0e4df91fa7fa82bb87b9", "size": "4676", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tools/telemetry/telemetry/decorators.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "ASP", "bytes": "853" }, { "name": "AppleScript", "bytes": "6973" }, { "name": "Arduino", "bytes": "464" }, { "name": "Assembly", "bytes": "52960" }, { "name": "Awk", "bytes": "8660" }, { "name": "C", "bytes": "42286199" }, { "name": "C#", "bytes": "1132" }, { "name": "C++", "bytes": "198616766" }, { "name": "CSS", "bytes": "937333" }, { "name": "DOT", "bytes": "2984" }, { "name": "Java", "bytes": "5695686" }, { "name": "JavaScript", "bytes": "21967126" }, { "name": "M", "bytes": "2190" }, { "name": "Matlab", "bytes": "2262" }, { "name": "Objective-C", "bytes": "7602057" }, { "name": "PHP", "bytes": "97817" }, { "name": "Perl", "bytes": "1210885" }, { "name": "Python", "bytes": "10774996" }, { "name": "R", "bytes": "262" }, { "name": "Shell", "bytes": "1316721" }, { "name": "Tcl", "bytes": "277091" }, { "name": "TypeScript", "bytes": "1560024" }, { "name": "XSLT", "bytes": "13493" }, { "name": "nesC", "bytes": "15243" } ], "symlink_target": "" }
import unittest import gflags import time import sys from ct.crypto import cert from ct.crypto import error from ct.crypto.asn1 import oid from ct.crypto.asn1 import x509_extension as x509_ext from ct.crypto.asn1 import x509_name FLAGS = gflags.FLAGS gflags.DEFINE_string("testdata_dir", "ct/crypto/testdata", "Location of test certs") class CertificateTest(unittest.TestCase): _PEM_FILE = "google_cert.pem" # Contains 3 certificates # C=US/ST=California/L=Mountain View/O=Google Inc/CN=www.google.com # C=US/O=Google Inc/CN=Google Internet Authority # C=US/O=Equifax/OU=Equifax Secure Certificate Authority _PEM_CHAIN_FILE = "google_chain.pem" _DER_FILE = "google_cert.der" # An X509v1 certificate _V1_PEM_FILE = "v1_cert.pem" # A old but common (0.5% of all certs as of 2013-10-01) SSL # cert that uses a different or older DER format for Boolean # values. _PEM_MATRIXSSL = "matrixssl_sample.pem" # Self-signed cert by marchnetworks.com for embedded systems # and uses start date in form of "0001010000Z" (no seconds) _PEM_MARCHNETWORKS = "marchnetworks_com.pem" # Self-signed cert by subrigo.net for embedded systems # and uses a start date in the form of 121214093107+0000 _PEM_SUBRIGONET = "subrigo_net.pem" # Self-signed cert by promise.com (as of 2013-10-16) that # is in use by embedded systems. # # * has a start date in the format of 120703092726-1200 # * uses a 512-key RSA key _PEM_PROMISECOM = "promise_com.pem" # This self-signed cert was used to test proper (or # improper) handling of UTF-8 characters in CN # See CVE 2009-2408 for more details # # Mozilla bug480509 # https://bugzilla.mozilla.org/show_bug.cgi?id=480509 # Mozilla bug484111 # https://bugzilla.mozilla.org/show_bug.cgi?id=484111 # RedHat bug510251 # https://bugzilla.redhat.com/show_bug.cgi?id=510251 _PEM_CN_UTF8 = "cn_utf8.pem" # A self-signed cert with null characters in various names # Misparsing was involved in CVE 2009-2408 (above) and # CVE-2013-4248 _PEM_NULL_CHARS = "null_chars.pem" # A certificate with a negative serial number, and, for more fun, # an extra leading ff-octet therein. _PEM_NEGATIVE_SERIAL = "negative_serial.pem" # A certificate with an ECDSA key and signature. _PEM_ECDSA = "ecdsa_cert.pem" # A certificate with multiple EKU extensions. _PEM_MULTIPLE_EKU = "multiple_eku.pem" # A certificate with multiple "interesting" SANs. _PEM_MULTIPLE_AN = "multiple_an.pem" # A certificate with multiple CN attributes. _PEM_MULTIPLE_CN = "multiple_cn.pem" # A certificate with authority cert issuer and authority cert serial. _PEM_AKID = "authority_keyid.pem" # A certificate chain with an EV policy. _PEM_EV_CHAIN = "ev_chain.pem" # EV OID for VeriSign Class 3 Public Primary Certification Authority _EV_POLICY_OID = oid.ObjectIdentifier(value="2.16.840.1.113733.1.7.23.6") _PEM_MULTIPLE_POLICIES = "multiple_policies.pem" # A certificate with a UserNotice containing a VisibleString. _PEM_USER_NOTICE = "user_notice.pem" # A certificate with an invalid (8-byte) IP address in a SAN. _PEM_INVALID_IP = "invalid_ip.pem" # A certificate with both kinds of AIA information. _PEM_AIA = "aia.pem" # A certificate with ASN1 indefinite length encoding. _PEM_INDEFINITE_LENGTH = "asn1_indefinite_length_encoding.pem" # A certificate with 99991231235959Z expiration date _PEM_NOT_WELL_DEFINED_EXPIRATION = "expiration_not_well_defined.pem" # A certificate with street address, postal code etc. provided _PEM_WITH_ADDRESS = "cert_with_address.pem" @property def pem_file(self): return FLAGS.testdata_dir + "/" + self._PEM_FILE def get_file(self, filename): return FLAGS.testdata_dir + "/" + filename def cert_from_pem_file(self, filename, strict=True): return cert.Certificate.from_pem_file( self.get_file(filename), strict_der=strict) def test_from_pem_file(self): c = self.cert_from_pem_file(self._PEM_FILE) self.assertTrue(isinstance(c, cert.Certificate)) def test_certs_from_pem_file(self): certs = list(cert.certs_from_pem_file(self.get_file( self._PEM_CHAIN_FILE))) self.assertEqual(3, len(certs)) self.assertTrue(all(map(lambda x: isinstance(x, cert.Certificate), certs))) self.assertTrue("google.com" in certs[0].print_subject_name()) self.assertTrue("Google Inc" in certs[1].print_subject_name()) self.assertTrue("Equifax" in certs[2].print_subject_name()) def test_from_pem(self): with open(self.get_file(self._PEM_FILE)) as f: c = cert.Certificate.from_pem(f.read()) self.assertTrue(isinstance(c, cert.Certificate)) def test_to_pem(self): with open(self.get_file(self._PEM_FILE)) as f: c = cert.Certificate.from_pem(f.read()) # PEM files can and do contain arbitrary additional information, # so we can't assert equality with the original contents. # Instead, simply check that we can read the newly constructed PEM. new_pem = c.to_pem() c2 = cert.Certificate.from_pem(new_pem) self.assertTrue(c2.is_identical_to(c)) def test_all_from_pem(self): with open(self.get_file(self._PEM_CHAIN_FILE)) as f: certs = list(cert.certs_from_pem(f.read())) self.assertEqual(3, len(certs)) self.assertTrue(all(map(lambda x: isinstance(x, cert.Certificate), certs))) self.assertTrue("google.com" in certs[0].print_subject_name()) self.assertTrue("Google Inc" in certs[1].print_subject_name()) self.assertTrue("Equifax" in certs[2].print_subject_name()) def test_from_der_file(self): c = cert.Certificate.from_der_file(self.get_file(self._DER_FILE)) self.assertTrue(isinstance(c, cert.Certificate)) def test_from_der(self): with open(self.get_file(self._DER_FILE), "rb") as f: cert_der = f.read() c = cert.Certificate.from_der(cert_der) self.assertTrue(isinstance(c, cert.Certificate)) self.assertEqual(c.to_der(), cert_der) def test_invalid_encoding_raises(self): self.assertRaises(error.EncodingError, cert.Certificate.from_der, "bogus_der_string") self.assertRaises(error.EncodingError, cert.Certificate.from_pem, "bogus_pem_string") def test_to_der(self): with open(self.get_file(self._DER_FILE), "rb") as f: der_string = f.read() c = cert.Certificate(der_string) self.assertEqual(der_string, c.to_der()) def test_identical_to_self(self): c = self.cert_from_pem_file(self._PEM_FILE) self.assertTrue(c.is_identical_to(c)) self.assertEqual(c, c) def test_identical(self): c = self.cert_from_pem_file(self._PEM_FILE) c2 = self.cert_from_pem_file(self._PEM_FILE) self.assertTrue(c.is_identical_to(c2)) self.assertTrue(c2.is_identical_to(c)) self.assertEqual(c2, c) def test_not_identical(self): c = self.cert_from_pem_file(self._PEM_FILE) c2 = self.cert_from_pem_file(self._V1_PEM_FILE) self.assertFalse(c2.is_identical_to(c)) self.assertNotEqual(c2, c) self.assertNotEqual(c2, "foo") def test_hash(self): c = self.cert_from_pem_file(self._PEM_FILE) c2 = self.cert_from_pem_file(self._PEM_FILE) self.assertEqual(hash(c), hash(c)) self.assertEqual(hash(c), hash(c2)) def test_parse_matrixssl(self): """Test parsing of old MatrixSSL.org sample certificate As of 2013-10-01, about 0.5% of all SSL sites use an old sample certificate from MatrixSSL.org. It appears it's used mostly for various home routers. Unfortunately it uses a non-DER encoding for boolean value: the DER encoding of True is 0xFF but this cert uses a BER encoding of 0x01. This causes pure DER parsers to break. This test makes sure we can parse this cert without exceptions or errors. """ self.assertRaises(error.ASN1Error, self.cert_from_pem_file, self._PEM_MATRIXSSL) c = self.cert_from_pem_file(self._PEM_MATRIXSSL, strict=False) issuer = c.print_issuer_name() self.assertTrue("MatrixSSL Sample Server" in issuer) def test_parse_marchnetworks(self): """Test parsing certificates issued by marchnetworks.com.""" c = self.cert_from_pem_file(self._PEM_MARCHNETWORKS) issuer = c.print_issuer_name() self.assertTrue("March Networks" in issuer) # 0001010000Z expected = [2000, 1, 1, 0, 0, 0, 5, 1, 0] self.assertEqual(list(c.not_before()), expected) # 3001010000Z expected = [2030, 1, 1, 0, 0, 0, 1, 1, 0] self.assertEqual(list(c.not_after()), expected) def test_parse_subrigonet(self): """Test parsing certificates issued by subrigo.net The certificates issued by subrigo.net (non-root) use an start date with time zone. Not Before: Dec 14 09:31:07 2012 Not After : Dec 13 09:31:07 2022 GMT """ c = self.cert_from_pem_file(self._PEM_SUBRIGONET) issuer = c.print_issuer_name() self.assertTrue("subrigo.net" in issuer) # timezone format -- 121214093107+0000 expected = [2012, 12, 14, 9, 31, 7, 4, 349, 0] self.assertEqual(list(c.not_before()), expected) # standard format -- 221213093107Z expected = [2022, 12, 13, 9, 31, 7, 1, 347, 0] self.assertEqual(list(c.not_after()), expected) def test_utf8_names(self): c = self.cert_from_pem_file(self._PEM_CN_UTF8) nameutf8 = "ñeco ñýáěšžěšžřěčíě+ščýáíéřáíÚ" unicodename = u"ñeco ñýáěšžěšžřěčíě+ščýáíéřáíÚ" # Compare UTF-8 strings directly. self.assertEqual(c.print_subject_name(), "CN=" + nameutf8) self.assertEqual(c.print_issuer_name(), "CN=" + nameutf8) cns = c.subject_common_names() self.assertEqual(1, len(cns)) self.assertEqual(cns[0], nameutf8) # Name comparison is unicode-based so decode and compare unicode names. # TODO(ekasper): implement proper stringprep-based name comparison # and use these test cases there. self.assertEqual(cns[0].value.decode("utf8"), unicodename) def test_null_chars_in_names(self): """Test handling null chars in subject and subject alternative names.""" c = self.cert_from_pem_file(self._PEM_NULL_CHARS) cns = c.subject_common_names() self.assertEqual(1, len(cns)) self.assertEqual("null.python.org\000example.org", cns[0]) alt_names = c.subject_alternative_names() self.assertEqual(len(alt_names), 5) self.assertEqual(alt_names[0].component_key(), x509_name.DNS_NAME) self.assertEqual(alt_names[0].component_value(), "altnull.python.org\000example.com") self.assertEqual(alt_names[1].component_key(), x509_name.RFC822_NAME) self.assertEqual(alt_names[1].component_value(), "null@python.org\000user@example.org") self.assertEqual(alt_names[2].component_key(),x509_name.URI_NAME) self.assertEqual(alt_names[2].component_value(), "http://null.python.org\000http://example.org") # the following does not contain nulls. self.assertEqual(alt_names[3].component_key(), x509_name.IP_ADDRESS_NAME) self.assertEqual(alt_names[3].component_value().as_octets(), (192, 0, 2, 1)) self.assertEqual(alt_names[4].component_key(), x509_name.IP_ADDRESS_NAME) self.assertEqual(alt_names[4].component_value().as_octets(), (32, 1, 13, 184, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)) def test_parse_promisecom(self): """Test parsing certificates issued by promise.com The certificates issued by promise.com (non-root) use an start date with time zone (and are 512-bit) Not Before: Jun 29 15:32:48 2011 Not After : Jun 26 15:32:48 2021 GMT """ c = self.cert_from_pem_file(self._PEM_PROMISECOM) issuer = c.print_issuer_name() self.assertTrue("Promise Technology Inc." in issuer) # 110629153248-1200 expected = [2011,6,29,15,32,48,2,180,0] self.assertEqual(list(c.not_before()), expected) # 210626153248Z expected = [2021,6,26,15,32,48,5,177,0] self.assertEqual(list(c.not_after()), expected) def test_parse_ecdsa_cert(self): c = self.cert_from_pem_file(self._PEM_ECDSA) self.assertTrue("kmonos.jp" in c.print_subject_name()) self.assertEquals(oid.ECDSA_WITH_SHA256, c.signature()["algorithm"]) self.assertEquals(oid.ECDSA_WITH_SHA256, c.signature_algorithm()["algorithm"]) def test_print_subject_name(self): c = self.cert_from_pem_file(self._PEM_FILE) subject = c.print_subject_name() # C=US, ST=California, L=Mountain View, O=Google Inc, CN=*.google.com self.assertTrue("US" in subject) self.assertTrue("California" in subject) self.assertTrue("Mountain View" in subject) self.assertTrue("Google Inc" in subject) self.assertTrue("*.google.com" in subject) def test_print_issuer_name(self): c = self.cert_from_pem_file(self._PEM_FILE) issuer = c.print_issuer_name() # Issuer: C=US, O=Google Inc, CN=Google Internet Authority self.assertTrue("US" in issuer) self.assertTrue("Google Inc" in issuer) self.assertTrue("Google Internet Authority" in issuer) def test_subject_common_names(self): c = self.cert_from_pem_file(self._PEM_FILE) cns = c.subject_common_names() self.assertEqual(1, len(cns)) self.assertEqual("*.google.com", cns[0]) def test_multiple_subject_common_names(self): c = self.cert_from_pem_file(self._PEM_MULTIPLE_CN) cns = c.subject_common_names() self.assertItemsEqual(cns, ["www.rd.io", "rdio.com", "rd.io", "api.rdio.com", "api.rd.io", "www.rdio.com"]) def test_subject_dns_names(self): c = self.cert_from_pem_file(self._PEM_FILE) dns_names = c.subject_dns_names() self.assertEqual(44, len(dns_names)) self.assertTrue("*.youtube.com" in dns_names) def test_subject_ip_addresses(self): c = self.cert_from_pem_file(self._PEM_MULTIPLE_AN) ips = c.subject_ip_addresses() self.assertEqual(1, len(ips)) self.assertEqual((129, 48, 105, 104), ips[0].as_octets()) def test_invalid_ip_addresses(self): with self.assertRaises(error.ASN1Error) as fail: self.cert_from_pem_file(self._PEM_INVALID_IP) self.assertIn("00000000ffffff00", str(fail.exception)) c = self.cert_from_pem_file(self._PEM_INVALID_IP, strict=False) ips = c.subject_ip_addresses() self.assertEqual(1, len(ips)) self.assertEqual((0, 0, 0, 0, 255, 255, 255, 0), ips[0].as_octets()) def test_subject_alternative_names(self): cert = self.cert_from_pem_file(self._PEM_MULTIPLE_AN) sans = cert.subject_alternative_names() self.assertEqual(4, len(sans)) self.assertEqual(x509_name.DNS_NAME, sans[0].component_key()) self.assertEqual("spires.wpafb.af.mil", sans[0].component_value()) self.assertEqual(x509_name.DIRECTORY_NAME, sans[1].component_key()) self.assertTrue(isinstance(sans[1].component_value(), x509_name.Name), sans[1].component_value()) self.assertEqual(x509_name.IP_ADDRESS_NAME, sans[2].component_key()) self.assertEqual((129, 48, 105, 104), sans[2].component_value().as_octets()) self.assertEqual(x509_name.URI_NAME, sans[3].component_key()) self.assertEqual("spires.wpafb.af.mil", sans[3].component_value()) def test_no_alternative_names(self): c = cert.Certificate.from_pem_file(self.get_file(self._V1_PEM_FILE)) self.assertEqual(0, len(c.subject_alternative_names())) self.assertEqual(0, len(c.subject_dns_names())) self.assertEqual(0, len(c.subject_ip_addresses())) def test_validity(self): certs = list(cert.certs_from_pem_file( self.get_file(self._PEM_CHAIN_FILE))) self.assertEqual(3, len(certs)) # notBefore: Sat Aug 22 16:41:51 1998 GMT # notAfter: Wed Aug 22 16:41:51 2018 GMT c = certs[2] # These two will start failing in 2018. self.assertTrue(c.is_temporally_valid_now()) self.assertFalse(c.is_expired()) self.assertFalse(c.is_not_yet_valid()) # Aug 22 16:41:51 2018 self.assertTrue(c.is_temporally_valid_at(time.gmtime(1534956111))) # Aug 22 16:41:52 2018 self.assertFalse(c.is_temporally_valid_at(time.gmtime(1534956112))) # Aug 22 16:41:50 1998 self.assertFalse(c.is_temporally_valid_at(time.gmtime(903804110))) # Aug 22 16:41:51 1998 self.assertTrue(c.is_temporally_valid_at(time.gmtime(903804111))) def test_basic_constraints(self): certs = list(cert.certs_from_pem_file( self.get_file(self._PEM_CHAIN_FILE))) self.assertFalse(certs[0].basic_constraint_ca()) self.assertTrue(certs[1].basic_constraint_ca()) self.assertIsNone(certs[0].basic_constraint_path_length()) self.assertEqual(0, certs[1].basic_constraint_path_length()) def test_version(self): c = self.cert_from_pem_file(self._PEM_FILE) self.assertEqual(3, c.version()) def test_issuer_common_name(self): c = self.cert_from_pem_file(self._PEM_FILE) icn = c.issuer_common_name() self.assertIn("Google Internet Authority", icn[0].value) self.assertEqual(len(icn), 1) def test_issuer_country_name(self): c = self.cert_from_pem_file(self._PEM_FILE) icn = c.issuer_country_name() self.assertIn("US", icn) self.assertEqual(len(icn), 1) def test_subject_organization_name(self): c = self.cert_from_pem_file(self._PEM_FILE) icn = c.subject_organization_name() self.assertIn("Google Inc", icn) self.assertEqual(len(icn), 1) def test_subject_street_address(self): c = self.cert_from_pem_file(self._PEM_WITH_ADDRESS) address = c.subject_street_address() self.assertIn("CQ Mail Centre", address) self.assertIn("Building 19", address) def test_subject_locality_name(self): c = self.cert_from_pem_file(self._PEM_WITH_ADDRESS) locality_name = c.subject_locality_name() self.assertIn("Rockhampton", locality_name) def test_subject_state_or_province(self): c = self.cert_from_pem_file(self._PEM_WITH_ADDRESS) state_or_province = c.subject_state_or_province_name() self.assertIn("Queensland", state_or_province) def test_subject_postal_code(self): c = self.cert_from_pem_file(self._PEM_WITH_ADDRESS) postal_code = c.subject_postal_code() self.assertIn("4702", postal_code) def test_serial_number(self): c = self.cert_from_pem_file(self._PEM_FILE) self.assertEqual(454887626504608315115709, c.serial_number()) def test_negative_serial_number(self): # Fails because of the leading ff-octet. self.assertRaises(error.ASN1Error, self.cert_from_pem_file, self._PEM_NEGATIVE_SERIAL) c = self.cert_from_pem_file(self._PEM_NEGATIVE_SERIAL, strict=False) self.assertEqual(-218943125988803304701934765446014018, c.serial_number()) def test_v1_cert(self): c = self.cert_from_pem_file(self._V1_PEM_FILE) self.assertEqual(1, c.version()) self.assertIsNone(c.basic_constraint_ca()) def test_fingerprint(self): c = cert.Certificate.from_der_file(self.get_file(self._DER_FILE)) self.assertEqual(c.fingerprint().encode("hex"), "570fe2e3bfee986ed4a158aed8770f2e21614659") self.assertEqual(c.fingerprint("sha1").encode("hex"), "570fe2e3bfee986ed4a158aed8770f2e21614659") self.assertEqual(c.fingerprint("sha256").encode("hex"), "6d4106b4544e9e5e7a0924ee86a577ffefaadae8b8dad73413a7" "d874747a81d1") def test_key_usage(self): c = cert.Certificate.from_pem_file(self.get_file(self._PEM_FILE)) self.assertTrue(c.key_usage(x509_ext.KeyUsage.DIGITAL_SIGNATURE)) certs = [c for c in cert.certs_from_pem_file(self.get_file( self._PEM_CHAIN_FILE))] # This leaf cert does not have a KeyUsage extension. self.assertEqual([], certs[0].key_usages()) self.assertIsNone(certs[0].key_usage( x509_ext.KeyUsage.DIGITAL_SIGNATURE)) # The second cert has keyCertSign and cRLSign. self.assertIsNotNone(certs[1].key_usage( x509_ext.KeyUsage.DIGITAL_SIGNATURE)) self.assertFalse(certs[1].key_usage( x509_ext.KeyUsage.DIGITAL_SIGNATURE)) self.assertTrue(certs[1].key_usage(x509_ext.KeyUsage.KEY_CERT_SIGN)) self.assertTrue(certs[1].key_usage(x509_ext.KeyUsage.CRL_SIGN)) self.assertItemsEqual([x509_ext.KeyUsage.KEY_CERT_SIGN, x509_ext.KeyUsage.CRL_SIGN], certs[1].key_usages()) def test_extended_key_usage(self): certs = [c for c in cert.certs_from_pem_file(self.get_file( self._PEM_CHAIN_FILE))] self.assertTrue(certs[0].extended_key_usage(oid.ID_KP_SERVER_AUTH)) self.assertIsNotNone( certs[0].extended_key_usage(oid.ID_KP_CODE_SIGNING)) self.assertFalse(certs[0].extended_key_usage(oid.ID_KP_CODE_SIGNING)) self.assertItemsEqual([oid.ID_KP_SERVER_AUTH, oid.ID_KP_CLIENT_AUTH], certs[0].extended_key_usages()) # EKU is normally only found in leaf certs. self.assertIsNone(certs[1].extended_key_usage(oid.ID_KP_SERVER_AUTH)) self.assertEqual([], certs[1].extended_key_usages()) def test_multiple_extensions(self): self.assertRaises(error.ASN1Error, cert.Certificate.from_pem_file, self.get_file(self._PEM_MULTIPLE_EKU)) c = cert.Certificate.from_pem_file(self.get_file(self._PEM_MULTIPLE_EKU), strict_der=False) self.assertTrue("www.m-budget-mobile-abo.ch" in c.subject_common_names()) self.assertRaises(cert.CertificateError, c.extended_key_usages) def test_key_identifiers(self): certs = [c for c in cert.certs_from_pem_file(self.get_file( self._PEM_CHAIN_FILE))] self.assertEqual("\x12\x4a\x06\x24\x28\xc4\x18\xa5\x63\x0b\x41\x6e\x95" "\xbf\x72\xb5\x3e\x1b\x8e\x8f", certs[0].subject_key_identifier()) self.assertEqual("\xbf\xc0\x30\xeb\xf5\x43\x11\x3e\x67\xba\x9e\x91\xfb" "\xfc\x6a\xda\xe3\x6b\x12\x24", certs[0].authority_key_identifier()) self.assertIsNone(certs[0].authority_key_identifier( identifier_type=x509_ext.AUTHORITY_CERT_ISSUER)) self.assertIsNone(certs[0].authority_key_identifier( identifier_type=x509_ext.AUTHORITY_CERT_SERIAL_NUMBER)) self.assertEqual(certs[0].authority_key_identifier(), certs[1].subject_key_identifier()) c = self.cert_from_pem_file(self._PEM_AKID) cert_issuers = c.authority_key_identifier( identifier_type=x509_ext.AUTHORITY_CERT_ISSUER) self.assertEqual(1, len(cert_issuers)) # A DirectoryName. cert_issuer = cert_issuers[0] self.assertEqual(x509_name.DIRECTORY_NAME, cert_issuer.component_key()) self.assertEqual(["KISA RootCA 1"], cert_issuer.component_value().attributes( oid.ID_AT_COMMON_NAME)) self.assertEqual(10119, c.authority_key_identifier( identifier_type=x509_ext.AUTHORITY_CERT_SERIAL_NUMBER)) def test_policies(self): certs = [c for c in cert.certs_from_pem_file(self.get_file( self._PEM_EV_CHAIN))] ev_cert = certs[0] policies = ev_cert.policies() self.assertEqual(1, len(policies)) self.assertTrue(ev_cert.has_policy(self._EV_POLICY_OID)) self.assertFalse(ev_cert.has_policy(oid.ANY_POLICY)) policy = ev_cert.policy(self._EV_POLICY_OID) qualifiers = policy[x509_ext.POLICY_QUALIFIERS] self.assertEqual(1, len(qualifiers)) qualifier = qualifiers[0] self.assertEqual(oid.ID_QT_CPS, qualifier[x509_ext.POLICY_QUALIFIER_ID]) # CPS location is an Any(IA5String). self.assertEqual("https://www.verisign.com/cps", qualifier[x509_ext.QUALIFIER].decoded_value) any_cert = certs[1] policies = any_cert.policies() self.assertEqual(1, len(policies)) self.assertFalse(any_cert.has_policy(self._EV_POLICY_OID)) self.assertTrue(any_cert.has_policy(oid.ANY_POLICY)) policy = ev_cert.policy(self._EV_POLICY_OID) qualifiers = policy[x509_ext.POLICY_QUALIFIERS] self.assertEqual(1, len(qualifiers)) qualifier = qualifiers[0] self.assertEqual(oid.ID_QT_CPS, qualifier[x509_ext.POLICY_QUALIFIER_ID]) # CPS location is an IA5String. self.assertEqual("https://www.verisign.com/cps", qualifier[x509_ext.QUALIFIER].decoded_value) no_policy_cert = certs[2] self.assertEqual(0, len(no_policy_cert.policies())) self.assertFalse(no_policy_cert.has_policy(self._EV_POLICY_OID)) self.assertFalse(no_policy_cert.has_policy(oid.ANY_POLICY)) def test_multiple_policies(self): c = self.cert_from_pem_file(self._PEM_MULTIPLE_POLICIES) policies = c.policies() self.assertEqual(2, len(policies)) self.assertTrue(c.has_policy(oid.ObjectIdentifier( value="1.3.6.1.4.1.6449.1.2.2.7"))) self.assertTrue(c.has_policy(oid.ObjectIdentifier( value="2.23.140.1.2.1"))) self.assertFalse(c.has_policy(oid.ANY_POLICY)) def test_user_notice(self): c = self.cert_from_pem_file(self._PEM_USER_NOTICE) policies = c.policies() self.assertEqual(1, len(policies)) qualifiers = policies[0][x509_ext.POLICY_QUALIFIERS] self.assertEqual(2, len(qualifiers)) qualifier = qualifiers[0] self.assertEqual(oid.ID_QT_UNOTICE, qualifier[x509_ext.POLICY_QUALIFIER_ID]) qualifier = qualifier[x509_ext.QUALIFIER].decoded_value self.assertIsNone(qualifier[x509_ext.NOTICE_REF]) expected_text = ("For more details, please visit our website " "https://www.cybertrust.ne.jp .") explicit_text = qualifier[x509_ext.EXPLICIT_TEXT].component_value() self.assertEqual(expected_text, explicit_text) def test_crl_distribution_points(self): c = self.cert_from_pem_file(self._PEM_FILE) crls = c.crl_distribution_points() self.assertEqual(1, len(crls)) crl = crls[0] # Optional components, not present. self.assertIsNone(crl[x509_ext.REASONS]) self.assertIsNone(crl[x509_ext.CRL_ISSUER]) # This is the prevalent form of CRL distribution points. dist_points = crl[x509_ext.DISTRIBUTION_POINT] self.assertEqual(x509_ext.FULL_NAME, dist_points.component_key()) self.assertEqual(1, len(dist_points.component_value())) # A GeneralName URI. dist_point = dist_points.component_value()[0] self.assertEqual("http://www.gstatic.com/GoogleInternetAuthority/" "GoogleInternetAuthority.crl", dist_point[x509_name.URI_NAME]) def test_aia(self): c = self.cert_from_pem_file(self._PEM_AIA) ca_issuers = c.ca_issuers() self.assertEqual(1, len(ca_issuers)) # A GeneralName URI. self.assertEqual("http://pki.google.com/GIAG2.crt", ca_issuers[0][x509_name.URI_NAME]) ocsp = c.ocsp_responders() self.assertEqual(1, len(ocsp)) self.assertEqual("http://clients1.google.com/ocsp", ocsp[0][x509_name.URI_NAME]) # Cert has CA issuers but no OCSP responders. c = self.cert_from_pem_file(self._PEM_FILE) self.assertItemsEqual([], c.ocsp_responders()) def test_is_self_signed_root(self): c = self.cert_from_pem_file(self._PEM_SUBRIGONET) self.assertTrue(c.is_self_signed()) def test_is_self_signed_leaf(self): c = self.cert_from_pem_file(self._PEM_AIA) self.assertFalse(c.is_self_signed()) def test_get_extensions(self): c = self.cert_from_pem_file(self._PEM_AIA) extensions = c.get_extensions() extensions_oids = [extension['extnID'] for extension in extensions] self.assertItemsEqual((oid.ID_CE_EXT_KEY_USAGE, oid.ID_CE_SUBJECT_ALT_NAME, oid.ID_PE_AUTHORITY_INFO_ACCESS, oid.ID_CE_SUBJECT_KEY_IDENTIFIER, oid.ID_CE_BASIC_CONSTRAINTS, oid.ID_CE_AUTHORITY_KEY_IDENTIFIER, oid.ID_CE_CERTIFICATE_POLICIES, oid.ID_CE_CRL_DISTRIBUTION_POINTS), extensions_oids) def test_indefinite_encoding(self): self.assertRaises(error.ASN1Error, self.cert_from_pem_file, self._PEM_INDEFINITE_LENGTH) c = self.cert_from_pem_file(self._PEM_INDEFINITE_LENGTH, strict=False) issuer = c.print_issuer_name() self.assertTrue("VeriSign Class 1 CA" in issuer) def test_expiration_not_well_defined(self): c = self.cert_from_pem_file(self._PEM_NOT_WELL_DEFINED_EXPIRATION) self.assertFalse(c.is_not_after_well_defined()) # Make sure that certificate with regular expiration date return true c = self.cert_from_pem_file(self._PEM_AIA) self.assertTrue(c.is_not_after_well_defined()) if __name__ == "__main__": sys.argv = FLAGS(sys.argv) unittest.main()
{ "content_hash": "4b3b9b2bf48104a6609998c4b1539e92", "timestamp": "", "source": "github", "line_count": 746, "max_line_length": 81, "avg_line_length": 41.74396782841823, "alnum_prop": 0.6222664654314248, "repo_name": "wreese/megacfs", "id": "9e3564a0bd371a360aa55f0203373da669ef90ac", "size": "31229", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "vendor/github.com/google/certificate-transparency/python/ct/crypto/cert_test.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "Go", "bytes": "310844" }, { "name": "Makefile", "bytes": "2405" }, { "name": "Protocol Buffer", "bytes": "14343" }, { "name": "Shell", "bytes": "9541" }, { "name": "Vim script", "bytes": "1044" } ], "symlink_target": "" }
''' Model tests ~~~~~~~~~~~ :copyright: (c) 2012 by Marc Schlaich :license: MIT, see LICENSE for more details. ''' import unittest from mock import Mock from guimvc import Model, Observer, List, Dict class TestModel(Model): attr1 = 0 attr2 = 0 class WildcardModel(TestModel): __observe__ = ('attr?', 'test*', 'data1') __exclude__ = ('attr2',) test_attr = 0 data1 = 0 class ModelTest(unittest.TestCase): def test_get_observable_attributes(self): model = TestModel() self.assertEqual(set(model), set(['attr1', 'attr2'])) model.attr3 = 0 self.assertEqual(set(model), set(['attr1', 'attr2', 'attr3'])) del model.attr3 self.assertEqual(set(model), set(['attr1', 'attr2'])) def test_notify(self): model = TestModel() obs = Observer(model) obs.notify = Mock() model.attr1 = 1 obs.notify.assert_called_with('attr1', 1, 0) # name, new, old model.attr1 = 1 obs.notify.assert_called_with('attr1', 1, 1) def test_wildcards(self): model = WildcardModel() obs = Observer(model) obs.notify = Mock() model.attr1 = 1 obs.notify.assert_called_with('attr1', 1, 0) obs.notify.reset_mock() model.attr2 = 2 self.assertFalse(obs.notify.called) model.test_attr = 1 obs.notify.assert_called_with('test_attr', 1, 0) model.data1 = 1 obs.notify.assert_called_with('data1', 1, 0) class ContainerTest(unittest.TestCase): def test_list(self): class ListModel(Model): data = List([1, 2, 3]) model = ListModel() obs = Observer(model) obs.notify = Mock() model.data.append(4) assert obs.notify.called obs.notify.reset_mock() del model.data[0] assert obs.notify.called obs.notify.reset_mock() assert model.data[0] == 2 # no modification assert not obs.notify.called obs.notify.reset_mock() model.data[0] = 1 assert model.data[0] == 1 assert obs.notify.called obs.notify.reset_mock() model.data.pop() # remove last item assert model.data[-1] == 3 assert obs.notify.called def test_dict(self): class DictModel(Model): data = Dict({1: 'blub', 'test': True}) model = DictModel() obs = Observer(model) obs.notify = Mock() model.data[0] = 'new' assert model.data[0] == 'new' assert obs.notify.called obs.notify.reset_mock() del model.data[0] assert obs.notify.called obs.notify.reset_mock() assert 0 not in model.data # no modification assert not obs.notify.called obs.notify.reset_mock() model.data.popitem() assert obs.notify.called
{ "content_hash": "4e3839ae04da7e8dd0eabb94a7b410a6", "timestamp": "", "source": "github", "line_count": 125, "max_line_length": 70, "avg_line_length": 23.264, "alnum_prop": 0.5660247592847317, "repo_name": "schlamar/guimvc", "id": "5d456233d36a9d43d686663bdf27e813c61a71f8", "size": "2932", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/test_model.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "19631" } ], "symlink_target": "" }
from __future__ import absolute_import from __future__ import division from past.utils import old_div from proteus import Domain from proteus import Norms from proteus import Profiling from proteus import Context from proteus.mprans import CLSVOF import numpy as np import math try: from .parameters import * except: from parameters import * #----- TEST CASE ----- # # 1: CIRCLE # 2: ZALESAK DISK # ----- PARAMETERS FOR CLSVOF ----- # doSpinUpStep=False lambdaFact=1.0 computeMetrics=0 eps_tolerance_clsvof=True # ----- REFINEMENT ----- # triangleFlag=1 unstructured=False refinement=2 # ----- NUMERICAL PARAMETERS ----- # cfl=0.33 useMetrics=0 # ----- number of space dimensions ----- # nd=2 T=1.0 nDTout=1 # ----- General parameters ----- # parallel = False linearSmoother = None # ----- Finite element sapce ----- # pDegree_clsvof=1 useBernstein=False useHex=False # ----- quadrature order ----- # clsvof_quad_order = 2*pDegree_clsvof+1 # parallel partitioning info # from proteus import MeshTools partitioningType = MeshTools.MeshParallelPartitioningTypes.node # create mesh # nn=nnx=nny=(2**refinement)*10+1 nnz=1 L=[1.0,1.0] # definition of he he=old_div(1.0,(nnx-1.0)) clsvof_nl_atol_res = 1.0e-10#max(1.0e-10, 0.01 * he ** 2) unstructured=unstructured #True for tetgen, false for tet or hex from rectangular grid box=Domain.RectangularDomain(L) box.writePoly("box") if unstructured: domain=Domain.PlanarStraightLineGraphDomain(fileprefix="box") domain.boundaryTags = box.boundaryTags bt = domain.boundaryTags triangleOptions="pAq30Dena%8.8f" % (0.5*he**2,) else: domain = box domain.MeshOptions.nn = domain.MeshOptions.nnx = domain.MeshOptions.nny = nn domain.MeshOptions.nnz = nnz soname="clsvof_level_"+repr(refinement) class MyCoefficients(CLSVOF.Coefficients): def attachModels(self,modelList): self.model = modelList[0] self.q_v = np.zeros(self.model.q[('grad(u)',0)].shape,'d') self.ebqe_v = np.zeros(self.model.ebqe[('grad(u)',0)].shape,'d') self.q_v_old = np.zeros(self.model.q[('grad(u)',0)].shape,'d') self.q_v_tStar = np.zeros(self.model.q[('grad(u)',0)].shape,'d')
{ "content_hash": "3181d66c8be1d96f9797fd1b6738b328", "timestamp": "", "source": "github", "line_count": 86, "max_line_length": 86, "avg_line_length": 25.267441860465116, "alnum_prop": 0.6948918545789231, "repo_name": "erdc/proteus", "id": "138289d098b1f677d384b55597056fa776cc1f56", "size": "2173", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "proteus/tests/CLSVOF/disc_ICs/clsvof.py", "mode": "33188", "license": "mit", "language": [ { "name": "Assembly", "bytes": "2790" }, { "name": "Asymptote", "bytes": "1569" }, { "name": "C", "bytes": "2827957" }, { "name": "C++", "bytes": "7262408" }, { "name": "Cython", "bytes": "154607" }, { "name": "Dockerfile", "bytes": "2738" }, { "name": "Fortran", "bytes": "51671" }, { "name": "Jupyter Notebook", "bytes": "33357" }, { "name": "Makefile", "bytes": "19043" }, { "name": "Python", "bytes": "12534530" }, { "name": "Roff", "bytes": "322" }, { "name": "Shell", "bytes": "14084" } ], "symlink_target": "" }
""" Bottle is a fast and simple micro-framework for small web applications. It offers request dispatching (Routes) with url parameter support, templates, a built-in HTTP Server and adapters for many third party WSGI/HTTP-server and template engines - all in a single file and with no dependencies other than the Python Standard Library. Homepage and documentation: http://bottlepy.org/ Copyright (c) 2011, Marcel Hellkamp. License: MIT (see LICENSE for details) """ from __future__ import with_statement __author__ = 'Marcel Hellkamp' __version__ = '0.11.dev' __license__ = 'MIT' # The gevent server adapter needs to patch some modules before they are imported # This is why we parse the commandline parameters here but handle them later if __name__ == '__main__': from optparse import OptionParser _cmd_parser = OptionParser(usage="usage: %prog [options] package.module:app") _opt = _cmd_parser.add_option _opt("--version", action="store_true", help="show version number.") _opt("-b", "--bind", metavar="ADDRESS", help="bind socket to ADDRESS.") _opt("-s", "--server", default='wsgiref', help="use SERVER as backend.") _opt("-p", "--plugin", action="append", help="install additional plugin/s.") _opt("--debug", action="store_true", help="start server in debug mode.") _opt("--reload", action="store_true", help="auto-reload on file changes.") _cmd_options, _cmd_args = _cmd_parser.parse_args() if _cmd_options.server and _cmd_options.server.startswith('gevent'): import gevent.monkey; gevent.monkey.patch_all() import base64, cgi, email.utils, functools, hmac, imp, itertools, mimetypes,\ os, re, subprocess, sys, tempfile, threading, time, urllib, warnings from datetime import date as datedate, datetime, timedelta from tempfile import TemporaryFile from traceback import format_exc, print_exc try: from json import dumps as json_dumps, loads as json_lds except ImportError: # pragma: no cover try: from simplejson import dumps as json_dumps, loads as json_lds except ImportError: try: from django.utils.simplejson import dumps as json_dumps, loads as json_lds except ImportError: def json_dumps(data): raise ImportError("JSON support requires Python 2.6 or simplejson.") json_lds = json_dumps # We now try to fix 2.5/2.6/3.1/3.2 incompatibilities. # It ain't pretty but it works... Sorry for the mess. py = sys.version_info py3k = py >= (3,0,0) py25 = py < (2,6,0) # Workaround for the missing "as" keyword in py3k. def _e(): return sys.exc_info()[1] # Workaround for the "print is a keyword/function" dilemma. _stdout, _stderr = sys.stdout.write, sys.stderr.write # Lots of stdlib and builtin differences. if py3k: import http.client as httplib import _thread as thread from urllib.parse import urljoin, parse_qsl, SplitResult as UrlSplitResult from urllib.parse import urlencode, quote as urlquote, unquote as urlunquote from http.cookies import SimpleCookie from collections import MutableMapping as DictMixin import pickle from io import BytesIO basestring = str unicode = str json_loads = lambda s: json_lds(touni(s)) callable = lambda x: hasattr(x, '__call__') imap = map else: # 2.x import httplib import thread from urlparse import urljoin, SplitResult as UrlSplitResult from urllib import urlencode, quote as urlquote, unquote as urlunquote from Cookie import SimpleCookie from itertools import imap import cPickle as pickle from StringIO import StringIO as BytesIO if py25: msg = "Python 2.5 support may be dropped in future versions of Bottle." warnings.warn(msg, DeprecationWarning) from cgi import parse_qsl from UserDict import DictMixin def next(it): return it.next() bytes = str else: # 2.6, 2.7 from urlparse import parse_qsl from collections import MutableMapping as DictMixin json_loads = json_lds # Some helpers for string/byte handling def tob(s, enc='utf8'): return s.encode(enc) if isinstance(s, unicode) else bytes(s) def touni(s, enc='utf8', err='strict'): return s.decode(enc, err) if isinstance(s, bytes) else unicode(s) tonat = touni if py3k else tob # 3.2 fixes cgi.FieldStorage to accept bytes (which makes a lot of sense). # 3.1 needs a workaround. NCTextIOWrapper = None if (3,0,0) < py < (3,2,0): from io import TextIOWrapper class NCTextIOWrapper(TextIOWrapper): def close(self): pass # Keep wrapped buffer open. # A bug in functools causes it to break if the wrapper is an instance method def update_wrapper(wrapper, wrapped, *a, **ka): try: functools.update_wrapper(wrapper, wrapped, *a, **ka) except AttributeError: pass # These helpers are used at module level and need to be defined first. # And yes, I know PEP-8, but sometimes a lower-case classname makes more sense. def depr(message): warnings.warn(message, DeprecationWarning, stacklevel=3) def makelist(data): # This is just to handy if isinstance(data, (tuple, list, set, dict)): return list(data) elif data: return [data] else: return [] class DictProperty(object): ''' Property that maps to a key in a local dict-like attribute. ''' def __init__(self, attr, key=None, read_only=False): self.attr, self.key, self.read_only = attr, key, read_only def __call__(self, func): functools.update_wrapper(self, func, updated=[]) self.getter, self.key = func, self.key or func.__name__ return self def __get__(self, obj, cls): if obj is None: return self key, storage = self.key, getattr(obj, self.attr) if key not in storage: storage[key] = self.getter(obj) return storage[key] def __set__(self, obj, value): if self.read_only: raise AttributeError("Read-Only property.") getattr(obj, self.attr)[self.key] = value def __delete__(self, obj): if self.read_only: raise AttributeError("Read-Only property.") del getattr(obj, self.attr)[self.key] class cached_property(object): ''' A property that is only computed once per instance and then replaces itself with an ordinary attribute. Deleting the attribute resets the property. ''' def __init__(self, func): self.func = func def __get__(self, obj, cls): if obj is None: return self value = obj.__dict__[self.func.__name__] = self.func(obj) return value class lazy_attribute(object): ''' A property that caches itself to the class object. ''' def __init__(self, func): functools.update_wrapper(self, func, updated=[]) self.getter = func def __get__(self, obj, cls): value = self.getter(cls) setattr(cls, self.__name__, value) return value ############################################################################### # Exceptions and Events ######################################################## ############################################################################### class BottleException(Exception): """ A base class for exceptions used by bottle. """ pass #TODO: This should subclass BaseRequest class HTTPResponse(BottleException): """ Used to break execution and immediately finish the response """ def __init__(self, output='', status=200, header=None): super(BottleException, self).__init__("HTTP Response %d" % status) self.status = int(status) self.output = output self.headers = HeaderDict(header) if header else None def apply(self, response): if self.headers: for key, value in self.headers.allitems(): response.headers[key] = value response.status = self.status class HTTPError(HTTPResponse): """ Used to generate an error page """ def __init__(self, code=500, output='Unknown Error', exception=None, traceback=None, header=None): super(HTTPError, self).__init__(output, code, header) self.exception = exception self.traceback = traceback def __repr__(self): return tonat(template(ERROR_PAGE_TEMPLATE, e=self)) ############################################################################### # Routing ###################################################################### ############################################################################### class RouteError(BottleException): """ This is a base class for all routing related exceptions """ class RouteReset(BottleException): """ If raised by a plugin or request handler, the route is reset and all plugins are re-applied. """ class RouterUnknownModeError(RouteError): pass class RouteSyntaxError(RouteError): """ The route parser found something not supported by this router """ class RouteBuildError(RouteError): """ The route could not been built """ class Router(object): ''' A Router is an ordered collection of route->target pairs. It is used to efficiently match WSGI requests against a number of routes and return the first target that satisfies the request. The target may be anything, usually a string, ID or callable object. A route consists of a path-rule and a HTTP method. The path-rule is either a static path (e.g. `/contact`) or a dynamic path that contains wildcards (e.g. `/wiki/<page>`). The wildcard syntax and details on the matching order are described in docs:`routing`. ''' default_pattern = '[^/]+' default_filter = 're' #: Sorry for the mess. It works. Trust me. rule_syntax = re.compile('(\\\\*)'\ '(?:(?::([a-zA-Z_][a-zA-Z_0-9]*)?()(?:#(.*?)#)?)'\ '|(?:<([a-zA-Z_][a-zA-Z_0-9]*)?(?::([a-zA-Z_]*)'\ '(?::((?:\\\\.|[^\\\\>]+)+)?)?)?>))') def __init__(self, strict=False): self.rules = {} # A {rule: Rule} mapping self.builder = {} # A rule/name->build_info mapping self.static = {} # Cache for static routes: {path: {method: target}} self.dynamic = [] # Cache for dynamic routes. See _compile() #: If true, static routes are no longer checked first. self.strict_order = strict self.filters = {'re': self.re_filter, 'int': self.int_filter, 'float': self.float_filter, 'path': self.path_filter} def re_filter(self, conf): return conf or self.default_pattern, None, None def int_filter(self, conf): return r'-?\d+', int, lambda x: str(int(x)) def float_filter(self, conf): return r'-?[\d.]+', float, lambda x: str(float(x)) def path_filter(self, conf): return r'.*?', None, None def add_filter(self, name, func): ''' Add a filter. The provided function is called with the configuration string as parameter and must return a (regexp, to_python, to_url) tuple. The first element is a string, the last two are callables or None. ''' self.filters[name] = func def parse_rule(self, rule): ''' Parses a rule into a (name, filter, conf) token stream. If mode is None, name contains a static rule part. ''' offset, prefix = 0, '' for match in self.rule_syntax.finditer(rule): prefix += rule[offset:match.start()] g = match.groups() if len(g[0])%2: # Escaped wildcard prefix += match.group(0)[len(g[0]):] offset = match.end() continue if prefix: yield prefix, None, None name, filtr, conf = g[1:4] if not g[2] is None else g[4:7] if not filtr: filtr = self.default_filter yield name, filtr, conf or None offset, prefix = match.end(), '' if offset <= len(rule) or prefix: yield prefix+rule[offset:], None, None def add(self, rule, method, target, name=None): ''' Add a new route or replace the target for an existing route. ''' if rule in self.rules: self.rules[rule][method] = target if name: self.builder[name] = self.builder[rule] return target = self.rules[rule] = {method: target} # Build pattern and other structures for dynamic routes anons = 0 # Number of anonymous wildcards pattern = '' # Regular expression pattern filters = [] # Lists of wildcard input filters builder = [] # Data structure for the URL builder is_static = True for key, mode, conf in self.parse_rule(rule): if mode: is_static = False mask, in_filter, out_filter = self.filters[mode](conf) if key: pattern += '(?P<%s>%s)' % (key, mask) else: pattern += '(?:%s)' % mask key = 'anon%d' % anons; anons += 1 if in_filter: filters.append((key, in_filter)) builder.append((key, out_filter or str)) elif key: pattern += re.escape(key) builder.append((None, key)) self.builder[rule] = builder if name: self.builder[name] = builder if is_static and not self.strict_order: self.static[self.build(rule)] = target return def fpat_sub(m): return m.group(0) if len(m.group(1)) % 2 else m.group(1) + '(?:' flat_pattern = re.sub(r'(\\*)(\(\?P<[^>]*>|\((?!\?))', fpat_sub, pattern) try: re_match = re.compile('^(%s)$' % pattern).match except re.error: raise RouteSyntaxError("Could not add Route: %s (%s)" % (rule, _e())) def match(path): """ Return an url-argument dictionary. """ url_args = re_match(path).groupdict() for name, wildcard_filter in filters: try: url_args[name] = wildcard_filter(url_args[name]) except ValueError: raise HTTPError(400, 'Path has wrong format.') return url_args try: combined = '%s|(^%s$)' % (self.dynamic[-1][0].pattern, flat_pattern) self.dynamic[-1] = (re.compile(combined), self.dynamic[-1][1]) self.dynamic[-1][1].append((match, target)) except (AssertionError, IndexError): # AssertionError: Too many groups self.dynamic.append((re.compile('(^%s$)' % flat_pattern), [(match, target)])) return match def build(self, _name, *anons, **query): ''' Build an URL by filling the wildcards in a rule. ''' builder = self.builder.get(_name) if not builder: raise RouteBuildError("No route with that name.", _name) try: for i, value in enumerate(anons): query['anon%d'%i] = value url = ''.join([f(query.pop(n)) if n else f for (n,f) in builder]) return url if not query else url+'?'+urlencode(query) except KeyError: raise RouteBuildError('Missing URL argument: %r' % _e().args[0]) def match(self, environ): ''' Return a (target, url_agrs) tuple or raise HTTPError(400/404/405). ''' path, targets, urlargs = environ['PATH_INFO'] or '/', None, {} if path in self.static: targets = self.static[path] else: for combined, rules in self.dynamic: match = combined.match(path) if not match: continue getargs, targets = rules[match.lastindex - 1] urlargs = getargs(path) if getargs else {} break if not targets: raise HTTPError(404, "Not found: " + repr(environ['PATH_INFO'])) method = environ['REQUEST_METHOD'].upper() if method in targets: return targets[method], urlargs if method == 'HEAD' and 'GET' in targets: return targets['GET'], urlargs if 'ANY' in targets: return targets['ANY'], urlargs allowed = [verb for verb in targets if verb != 'ANY'] if 'GET' in allowed and 'HEAD' not in allowed: allowed.append('HEAD') raise HTTPError(405, "Method not allowed.", header=[('Allow',",".join(allowed))]) class Route(object): ''' This class wraps a route callback along with route specific metadata and configuration and applies Plugins on demand. It is also responsible for turing an URL path rule into a regular expression usable by the Router. ''' def __init__(self, app, rule, method, callback, name=None, plugins=None, skiplist=None, **config): #: The application this route is installed to. self.app = app #: The path-rule string (e.g. ``/wiki/:page``). self.rule = rule #: The HTTP method as a string (e.g. ``GET``). self.method = method #: The original callback with no plugins applied. Useful for introspection. self.callback = callback #: The name of the route (if specified) or ``None``. self.name = name or None #: A list of route-specific plugins (see :meth:`Bottle.route`). self.plugins = plugins or [] #: A list of plugins to not apply to this route (see :meth:`Bottle.route`). self.skiplist = skiplist or [] #: Additional keyword arguments passed to the :meth:`Bottle.route` #: decorator are stored in this dictionary. Used for route-specific #: plugin configuration and meta-data. self.config = ConfigDict(config) def __call__(self, *a, **ka): depr("Some APIs changed to return Route() instances instead of"\ " callables. Make sure to use the Route.call method and not to"\ " call Route instances directly.") return self.call(*a, **ka) @cached_property def call(self): ''' The route callback with all plugins applied. This property is created on demand and then cached to speed up subsequent requests.''' return self._make_callback() def reset(self): ''' Forget any cached values. The next time :attr:`call` is accessed, all plugins are re-applied. ''' self.__dict__.pop('call', None) def prepare(self): ''' Do all on-demand work immediately (useful for debugging).''' self.call @property def _context(self): depr('Switch to Plugin API v2 and access the Route object directly.') return dict(rule=self.rule, method=self.method, callback=self.callback, name=self.name, app=self.app, config=self.config, apply=self.plugins, skip=self.skiplist) def all_plugins(self): ''' Yield all Plugins affecting this route. ''' unique = set() for p in reversed(self.app.plugins + self.plugins): if True in self.skiplist: break name = getattr(p, 'name', False) if name and (name in self.skiplist or name in unique): continue if p in self.skiplist or type(p) in self.skiplist: continue if name: unique.add(name) yield p def _make_callback(self): callback = self.callback for plugin in self.all_plugins(): try: if hasattr(plugin, 'apply'): api = getattr(plugin, 'api', 1) context = self if api > 1 else self._context callback = plugin.apply(callback, context) else: callback = plugin(callback) except RouteReset: # Try again with changed configuration. return self._make_callback() if not callback is self.callback: update_wrapper(callback, self.callback) return callback def __repr__(self): return '<%s %r %r>' % (self.method, self.rule, self.callback) ############################################################################### # Application Object ########################################################### ############################################################################### class Bottle(object): """ Each Bottle object represents a single, distinct web application and consists of routes, callbacks, plugins and configuration. Instances are callable WSGI applications. """ def __init__(self, catchall=True, autojson=True, config=None): self.routes = [] # List of installed :class:`Route` instances. self.router = Router() # Maps requests to :class:`Route` instances. self.plugins = [] # List of installed plugins. self.error_handler = {} #: If true, most exceptions are catched and returned as :exc:`HTTPError` self.config = ConfigDict(config or {}) self.catchall = catchall #: An instance of :class:`HooksPlugin`. Empty by default. self.hooks = HooksPlugin() self.install(self.hooks) if autojson: self.install(JSONPlugin()) self.install(TemplatePlugin()) def mount(self, prefix, app, **options): ''' Mount an application (:class:`Bottle` or plain WSGI) to a specific URL prefix. Example:: root_app.mount('/admin/', admin_app) :param prefix: path prefix or `mount-point`. If it ends in a slash, that slash is mandatory. :param app: an instance of :class:`Bottle` or a WSGI application. All other parameters are passed to the underlying :meth:`route` call. ''' if isinstance(app, basestring): prefix, app = app, prefix depr('Parameter order of Bottle.mount() changed.') # 0.10 parts = [p for p in prefix.split('/') if p] if not parts: raise ValueError('Empty path prefix.') path_depth = len(parts) options.setdefault('skip', True) options.setdefault('method', 'ANY') @self.route('/%s/:#.*#' % '/'.join(parts), **options) def mountpoint(): try: request.path_shift(path_depth) rs = BaseResponse([], 200) def start_response(status, header): rs.status = status for name, value in header: rs.add_header(name, value) return rs.body.append rs.body = itertools.chain(rs.body, app(request.environ, start_response)) return HTTPResponse(rs.body, rs.status_code, rs.headers) finally: request.path_shift(-path_depth) if not prefix.endswith('/'): self.route('/' + '/'.join(parts), callback=mountpoint, **options) def merge(self, routes): ''' Merge the routes of another :cls:`Bottle` application or a list of :class:`Route` objects into this application. The routes keep their 'owner', meaning that the :data:`Route.app` attribute is not changed. ''' if isinstance(routes, Bottle): routes = routes.routes for route in routes: self.add_route(route) def install(self, plugin): ''' Add a plugin to the list of plugins and prepare it for being applied to all routes of this application. A plugin may be a simple decorator or an object that implements the :class:`Plugin` API. ''' if hasattr(plugin, 'setup'): plugin.setup(self) if not callable(plugin) and not hasattr(plugin, 'apply'): raise TypeError("Plugins must be callable or implement .apply()") self.plugins.append(plugin) self.reset() return plugin def uninstall(self, plugin): ''' Uninstall plugins. Pass an instance to remove a specific plugin, a type object to remove all plugins that match that type, a string to remove all plugins with a matching ``name`` attribute or ``True`` to remove all plugins. Return the list of removed plugins. ''' removed, remove = [], plugin for i, plugin in list(enumerate(self.plugins))[::-1]: if remove is True or remove is plugin or remove is type(plugin) \ or getattr(plugin, 'name', True) == remove: removed.append(plugin) del self.plugins[i] if hasattr(plugin, 'close'): plugin.close() if removed: self.reset() return removed def run(self, **kwargs): ''' Calls :func:`run` with the same parameters. ''' run(self, **kwargs) def reset(self, route=None): ''' Reset all routes (force plugins to be re-applied) and clear all caches. If an ID or route object is given, only that specific route is affected. ''' if route is None: routes = self.routes elif isinstance(route, Route): routes = [route] else: routes = [self.routes[route]] for route in routes: route.reset() if DEBUG: for route in routes: route.prepare() self.hooks.trigger('app_reset') def close(self): ''' Close the application and all installed plugins. ''' for plugin in self.plugins: if hasattr(plugin, 'close'): plugin.close() self.stopped = True def match(self, environ): """ Search for a matching route and return a (:class:`Route` , urlargs) tuple. The second value is a dictionary with parameters extracted from the URL. Raise :exc:`HTTPError` (404/405) on a non-match.""" return self.router.match(environ) def get_url(self, routename, **kargs): """ Return a string that matches a named route """ scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/' location = self.router.build(routename, **kargs).lstrip('/') return urljoin(urljoin('/', scriptname), location) def add_route(self, route): ''' Add a route object, but do not change the :data:`Route.app` attribute.''' self.routes.append(route) self.router.add(route.rule, route.method, route, name=route.name) if DEBUG: route.prepare() def route(self, path=None, method='GET', callback=None, name=None, apply=None, skip=None, **config): """ A decorator to bind a function to a request URL. Example:: @app.route('/hello/:name') def hello(name): return 'Hello %s' % name The ``:name`` part is a wildcard. See :class:`Router` for syntax details. :param path: Request path or a list of paths to listen to. If no path is specified, it is automatically generated from the signature of the function. :param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of methods to listen to. (default: `GET`) :param callback: An optional shortcut to avoid the decorator syntax. ``route(..., callback=func)`` equals ``route(...)(func)`` :param name: The name for this route. (default: None) :param apply: A decorator or plugin or a list of plugins. These are applied to the route callback in addition to installed plugins. :param skip: A list of plugins, plugin classes or names. Matching plugins are not installed to this route. ``True`` skips all. Any additional keyword arguments are stored as route-specific configuration and passed to plugins (see :meth:`Plugin.apply`). """ if callable(path): path, callback = None, path plugins = makelist(apply) skiplist = makelist(skip) def decorator(callback): # TODO: Documentation and tests if isinstance(callback, basestring): callback = load(callback) for rule in makelist(path) or yieldroutes(callback): for verb in makelist(method): verb = verb.upper() route = Route(self, rule, verb, callback, name=name, plugins=plugins, skiplist=skiplist, **config) self.add_route(route) return callback return decorator(callback) if callback else decorator def get(self, path=None, method='GET', **options): """ Equals :meth:`route`. """ return self.route(path, method, **options) def post(self, path=None, method='POST', **options): """ Equals :meth:`route` with a ``POST`` method parameter. """ return self.route(path, method, **options) def put(self, path=None, method='PUT', **options): """ Equals :meth:`route` with a ``PUT`` method parameter. """ return self.route(path, method, **options) def delete(self, path=None, method='DELETE', **options): """ Equals :meth:`route` with a ``DELETE`` method parameter. """ return self.route(path, method, **options) def error(self, code=500): """ Decorator: Register an output handler for a HTTP error code""" def wrapper(handler): self.error_handler[int(code)] = handler return handler return wrapper def hook(self, name): """ Return a decorator that attaches a callback to a hook. """ def wrapper(func): self.hooks.add(name, func) return func return wrapper def handle(self, path, method='GET'): """ (deprecated) Execute the first matching route callback and return the result. :exc:`HTTPResponse` exceptions are catched and returned. If :attr:`Bottle.catchall` is true, other exceptions are catched as well and returned as :exc:`HTTPError` instances (500). """ depr("This method will change semantics in 0.10. Try to avoid it.") if isinstance(path, dict): return self._handle(path) return self._handle({'PATH_INFO': path, 'REQUEST_METHOD': method.upper()}) def _handle(self, environ): try: route, args = self.router.match(environ) environ['route.handle'] = environ['bottle.route'] = route environ['route.url_args'] = args environ['bottle.app'] = self request.bind(environ) response.bind() return route.call(**args) except HTTPResponse: return _e() except RouteReset: route.reset() return self._handle(environ) except (KeyboardInterrupt, SystemExit, MemoryError): raise except Exception: if not self.catchall: raise stacktrace = format_exc(10) environ['wsgi.errors'].write(stacktrace) return HTTPError(500, "Internal Server Error", _e(), stacktrace) def _cast(self, out, peek=None): """ Try to convert the parameter into something WSGI compatible and set correct HTTP headers when possible. Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like, iterable of strings and iterable of unicodes """ # Empty output is done here if not out: response['Content-Length'] = 0 return [] # Join lists of byte or unicode strings. Mixed lists are NOT supported if isinstance(out, (tuple, list))\ and isinstance(out[0], (bytes, unicode)): out = out[0][0:0].join(out) # b'abc'[0:0] -> b'' # Encode unicode strings if isinstance(out, unicode): out = out.encode(response.charset) # Byte Strings are just returned if isinstance(out, bytes): response['Content-Length'] = len(out) return [out] # HTTPError or HTTPException (recursive, because they may wrap anything) # TODO: Handle these explicitly in handle() or make them iterable. if isinstance(out, HTTPError): out.apply(response) out = self.error_handler.get(out.status, repr)(out) if isinstance(out, HTTPResponse): depr('Error handlers must not return :exc:`HTTPResponse`.') #0.9 return self._cast(out) if isinstance(out, HTTPResponse): out.apply(response) return self._cast(out.output) # File-like objects. if hasattr(out, 'read'): if 'wsgi.file_wrapper' in request.environ: return request.environ['wsgi.file_wrapper'](out) elif hasattr(out, 'close') or not hasattr(out, '__iter__'): return WSGIFileWrapper(out) # Handle Iterables. We peek into them to detect their inner type. try: out = iter(out) first = next(out) while not first: first = next(out) except StopIteration: return self._cast('') except HTTPResponse: first = _e() except (KeyboardInterrupt, SystemExit, MemoryError): raise except Exception: if not self.catchall: raise first = HTTPError(500, 'Unhandled exception', _e(), format_exc(10)) # These are the inner types allowed in iterator or generator objects. if isinstance(first, HTTPResponse): return self._cast(first) if isinstance(first, bytes): return itertools.chain([first], out) if isinstance(first, unicode): return imap(lambda x: x.encode(response.charset), itertools.chain([first], out)) return self._cast(HTTPError(500, 'Unsupported response type: %s'\ % type(first))) def wsgi(self, environ, start_response): """ The bottle WSGI-interface. """ try: out = self._cast(self._handle(environ)) # rfc2616 section 4.3 if response._status_code in (100, 101, 204, 304)\ or request.method == 'HEAD': if hasattr(out, 'close'): out.close() out = [] if isinstance(response._status_line, unicode): response._status_line = str(response._status_line) start_response(response._status_line, list(response.iter_headers())) return out except (KeyboardInterrupt, SystemExit, MemoryError): raise except Exception: if not self.catchall: raise err = '<h1>Critical error while processing request: %s</h1>' \ % html_escape(environ.get('PATH_INFO', '/')) if DEBUG: err += '<h2>Error:</h2>\n<pre>\n%s\n</pre>\n' \ '<h2>Traceback:</h2>\n<pre>\n%s\n</pre>\n' \ % (html_escape(repr(_e())), html_escape(format_exc(10))) environ['wsgi.errors'].write(err) headers = [('Content-Type', 'text/html; charset=UTF-8')] start_response('500 INTERNAL SERVER ERROR', headers) return [tob(err)] def __call__(self, environ, start_response): ''' Each instance of :class:'Bottle' is a WSGI application. ''' return self.wsgi(environ, start_response) ############################################################################### # HTTP and WSGI Tools ########################################################## ############################################################################### class BaseRequest(DictMixin): """ A wrapper for WSGI environment dictionaries that adds a lot of convenient access methods and properties. Most of them are read-only.""" #: Maximum size of memory buffer for :attr:`body` in bytes. MEMFILE_MAX = 102400 #: Maximum number pr GET or POST parameters per request MAX_PARAMS = 100 def __init__(self, environ): """ Wrap a WSGI environ dictionary. """ #: The wrapped WSGI environ dictionary. This is the only real attribute. #: All other attributes actually are read-only properties. self.environ = environ environ['bottle.request'] = self @DictProperty('environ', 'bottle.app', read_only=True) def app(self): ''' Bottle application handling this request. ''' raise AttributeError('This request is not connected to an application.') @property def path(self): ''' The value of ``PATH_INFO`` with exactly one prefixed slash (to fix broken clients and avoid the "empty path" edge case). ''' return '/' + self.environ.get('PATH_INFO','').lstrip('/') @property def method(self): ''' The ``REQUEST_METHOD`` value as an uppercase string. ''' return self.environ.get('REQUEST_METHOD', 'GET').upper() @DictProperty('environ', 'bottle.request.headers', read_only=True) def headers(self): ''' A :class:`WSGIHeaderDict` that provides case-insensitive access to HTTP request headers. ''' return WSGIHeaderDict(self.environ) def get_header(self, name, default=None): ''' Return the value of a request header, or a given default value. ''' return self.headers.get(name, default) @DictProperty('environ', 'bottle.request.cookies', read_only=True) def cookies(self): """ Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT decoded. Use :meth:`get_cookie` if you expect signed cookies. """ cookies = SimpleCookie(self.environ.get('HTTP_COOKIE','')) cookies = list(cookies.values())[:self.MAX_PARAMS] return FormsDict((c.key, c.value) for c in cookies) def get_cookie(self, key, default=None, secret=None): """ Return the content of a cookie. To read a `Signed Cookie`, the `secret` must match the one used to create the cookie (see :meth:`BaseResponse.set_cookie`). If anything goes wrong (missing cookie or wrong signature), return a default value. """ value = self.cookies.get(key) if secret and value: dec = cookie_decode(value, secret) # (key, value) tuple or None return dec[1] if dec and dec[0] == key else default return value or default @DictProperty('environ', 'bottle.request.query', read_only=True) def query(self): ''' The :attr:`query_string` parsed into a :class:`FormsDict`. These values are sometimes called "URL arguments" or "GET parameters", but not to be confused with "URL wildcards" as they are provided by the :class:`Router`. ''' pairs = parse_qsl(self.query_string, keep_blank_values=True) get = self.environ['bottle.get'] = FormsDict() for key, value in pairs[:self.MAX_PARAMS]: get[key] = value return get @DictProperty('environ', 'bottle.request.forms', read_only=True) def forms(self): """ Form values parsed from an `url-encoded` or `multipart/form-data` encoded POST or PUT request body. The result is retuned as a :class:`FormsDict`. All keys and values are strings. File uploads are stored separately in :attr:`files`. """ forms = FormsDict() for name, item in self.POST.allitems(): if not hasattr(item, 'filename'): forms[name] = item return forms @DictProperty('environ', 'bottle.request.params', read_only=True) def params(self): """ A :class:`FormsDict` with the combined values of :attr:`query` and :attr:`forms`. File uploads are stored in :attr:`files`. """ params = FormsDict() for key, value in self.query.allitems(): params[key] = value for key, value in self.forms.allitems(): params[key] = value return params @DictProperty('environ', 'bottle.request.files', read_only=True) def files(self): """ File uploads parsed from an `url-encoded` or `multipart/form-data` encoded POST or PUT request body. The values are instances of :class:`cgi.FieldStorage`. The most important attributes are: filename The filename, if specified; otherwise None; this is the client side filename, *not* the file name on which it is stored (that's a temporary file you don't deal with) file The file(-like) object from which you can read the data. value The value as a *string*; for file uploads, this transparently reads the file every time you request the value. Do not do this on big files. """ files = FormsDict() for name, item in self.POST.allitems(): if hasattr(item, 'filename'): files[name] = item return files @DictProperty('environ', 'bottle.request.json', read_only=True) def json(self): ''' If the ``Content-Type`` header is ``application/json``, this property holds the parsed content of the request body. Only requests smaller than :attr:`MEMFILE_MAX` are processed to avoid memory exhaustion. ''' if 'application/json' in self.environ.get('CONTENT_TYPE', '') \ and 0 < self.content_length < self.MEMFILE_MAX: return json_loads(self.body.read(self.MEMFILE_MAX)) return None @DictProperty('environ', 'bottle.request.body', read_only=True) def _body(self): maxread = max(0, self.content_length) stream = self.environ['wsgi.input'] body = BytesIO() if maxread < self.MEMFILE_MAX else TemporaryFile(mode='w+b') while maxread > 0: part = stream.read(min(maxread, self.MEMFILE_MAX)) if not part: break body.write(part) maxread -= len(part) self.environ['wsgi.input'] = body body.seek(0) return body @property def body(self): """ The HTTP request body as a seek-able file-like object. Depending on :attr:`MEMFILE_MAX`, this is either a temporary file or a :class:`io.BytesIO` instance. Accessing this property for the first time reads and replaces the ``wsgi.input`` environ variable. Subsequent accesses just do a `seek(0)` on the file object. """ self._body.seek(0) return self._body #: An alias for :attr:`query`. GET = query @DictProperty('environ', 'bottle.request.post', read_only=True) def POST(self): """ The values of :attr:`forms` and :attr:`files` combined into a single :class:`FormsDict`. Values are either strings (form values) or instances of :class:`cgi.FieldStorage` (file uploads). """ post = FormsDict() safe_env = {'QUERY_STRING':''} # Build a safe environment for cgi for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'): if key in self.environ: safe_env[key] = self.environ[key] if NCTextIOWrapper: fb = NCTextIOWrapper(self.body, encoding='ISO-8859-1', newline='\n') else: fb = self.body data = cgi.FieldStorage(fp=fb, environ=safe_env, keep_blank_values=True) for item in (data.list or [])[:self.MAX_PARAMS]: post[item.name] = item if item.filename else item.value return post @property def COOKIES(self): ''' Alias for :attr:`cookies` (deprecated). ''' depr('BaseRequest.COOKIES was renamed to BaseRequest.cookies (lowercase).') return self.cookies @property def url(self): """ The full request URI including hostname and scheme. If your app lives behind a reverse proxy or load balancer and you get confusing results, make sure that the ``X-Forwarded-Host`` header is set correctly. """ return self.urlparts.geturl() @DictProperty('environ', 'bottle.request.urlparts', read_only=True) def urlparts(self): ''' The :attr:`url` string as an :class:`urlparse.SplitResult` tuple. The tuple contains (scheme, host, path, query_string and fragment), but the fragment is always empty because it is not visible to the server. ''' env = self.environ http = env.get('wsgi.url_scheme', 'http') host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST') if not host: # HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients. host = env.get('SERVER_NAME', '127.0.0.1') port = env.get('SERVER_PORT') if port and port != ('80' if http == 'http' else '443'): host += ':' + port path = urlquote(self.fullpath) return UrlSplitResult(http, host, path, env.get('QUERY_STRING'), '') @property def fullpath(self): """ Request path including :attr:`script_name` (if present). """ return urljoin(self.script_name, self.path.lstrip('/')) @property def query_string(self): """ The raw :attr:`query` part of the URL (everything in between ``?`` and ``#``) as a string. """ return self.environ.get('QUERY_STRING', '') @property def script_name(self): ''' The initial portion of the URL's `path` that was removed by a higher level (server or routing middleware) before the application was called. This script path is returned with leading and tailing slashes. ''' script_name = self.environ.get('SCRIPT_NAME', '').strip('/') return '/' + script_name + '/' if script_name else '/' def path_shift(self, shift=1): ''' Shift path segments from :attr:`path` to :attr:`script_name` and vice versa. :param shift: The number of path segments to shift. May be negative to change the shift direction. (default: 1) ''' script = self.environ.get('SCRIPT_NAME','/') self['SCRIPT_NAME'], self['PATH_INFO'] = path_shift(script, self.path, shift) @property def content_length(self): ''' The request body length as an integer. The client is responsible to set this header. Otherwise, the real length of the body is unknown and -1 is returned. In this case, :attr:`body` will be empty. ''' return int(self.environ.get('CONTENT_LENGTH') or -1) @property def is_xhr(self): ''' True if the request was triggered by a XMLHttpRequest. This only works with JavaScript libraries that support the `X-Requested-With` header (most of the popular libraries do). ''' requested_with = self.environ.get('HTTP_X_REQUESTED_WITH','') return requested_with.lower() == 'xmlhttprequest' @property def is_ajax(self): ''' Alias for :attr:`is_xhr`. "Ajax" is not the right term. ''' return self.is_xhr @property def auth(self): """ HTTP authentication data as a (user, password) tuple. This implementation currently supports basic (not digest) authentication only. If the authentication happened at a higher level (e.g. in the front web-server or a middleware), the password field is None, but the user field is looked up from the ``REMOTE_USER`` environ variable. On any errors, None is returned. """ basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION','')) if basic: return basic ruser = self.environ.get('REMOTE_USER') if ruser: return (ruser, None) return None @property def remote_route(self): """ A list of all IPs that were involved in this request, starting with the client IP and followed by zero or more proxies. This does only work if all proxies support the ```X-Forwarded-For`` header. Note that this information can be forged by malicious clients. """ proxy = self.environ.get('HTTP_X_FORWARDED_FOR') if proxy: return [ip.strip() for ip in proxy.split(',')] remote = self.environ.get('REMOTE_ADDR') return [remote] if remote else [] @property def remote_addr(self): """ The client IP as a string. Note that this information can be forged by malicious clients. """ route = self.remote_route return route[0] if route else None def copy(self): """ Return a new :class:`Request` with a shallow :attr:`environ` copy. """ return Request(self.environ.copy()) def __getitem__(self, key): return self.environ[key] def __delitem__(self, key): self[key] = ""; del(self.environ[key]) def __iter__(self): return iter(self.environ) def __len__(self): return len(self.environ) def keys(self): return self.environ.keys() def __setitem__(self, key, value): """ Change an environ value and clear all caches that depend on it. """ if self.environ.get('bottle.request.readonly'): raise KeyError('The environ dictionary is read-only.') self.environ[key] = value todelete = () if key == 'wsgi.input': todelete = ('body', 'forms', 'files', 'params', 'post', 'json') elif key == 'QUERY_STRING': todelete = ('query', 'params') elif key.startswith('HTTP_'): todelete = ('headers', 'cookies') for key in todelete: self.environ.pop('bottle.request.'+key, None) def __repr__(self): return '<%s: %s %s>' % (self.__class__.__name__, self.method, self.url) def _hkey(s): return s.title().replace('_','-') class HeaderProperty(object): def __init__(self, name, reader=None, writer=str, default=''): self.name, self.reader, self.writer, self.default = name, reader, writer, default self.__doc__ = 'Current value of the %r header.' % name.title() def __get__(self, obj, cls): if obj is None: return self value = obj.headers.get(self.name) return self.reader(value) if (value and self.reader) else (value or self.default) def __set__(self, obj, value): if self.writer: value = self.writer(value) obj.headers[self.name] = value def __delete__(self, obj): if self.name in obj.headers: del obj.headers[self.name] class BaseResponse(object): """ Storage class for a response body as well as headers and cookies. This class does support dict-like case-insensitive item-access to headers, but is NOT a dict. Most notably, iterating over a response yields parts of the body and not the headers. """ default_status = 200 default_content_type = 'text/html; charset=UTF-8' # Header blacklist for specific response codes # (rfc2616 section 10.2.3 and 10.3.5) bad_headers = { 204: set(('Content-Type',)), 304: set(('Allow', 'Content-Encoding', 'Content-Language', 'Content-Length', 'Content-Range', 'Content-Type', 'Content-Md5', 'Last-Modified'))} def __init__(self, body='', status=None, **headers): self._status_line = None self._status_code = None self.body = body self._cookies = None self._headers = {'Content-Type': [self.default_content_type]} self.status = status or self.default_status if headers: for name, value in headers.items(): self[name] = value def copy(self): ''' Returns a copy of self. ''' copy = Response() copy.status = self.status copy._headers = dict((k, v[:]) for (k, v) in self._headers.items()) return copy def __iter__(self): return iter(self.body) def close(self): if hasattr(self.body, 'close'): self.body.close() @property def status_line(self): ''' The HTTP status line as a string (e.g. ``404 Not Found``).''' return self._status_line @property def status_code(self): ''' The HTTP status code as an integer (e.g. 404).''' return self._status_code def _set_status(self, status): if isinstance(status, int): code, status = status, _HTTP_STATUS_LINES.get(status) elif ' ' in status: status = status.strip() code = int(status.split()[0]) else: raise ValueError('String status line without a reason phrase.') if not 100 <= code <= 999: raise ValueError('Status code out of range.') self._status_code = code self._status_line = status or ('%d Unknown' % code) def _get_status(self): return self._status_line status = property(_get_status, _set_status, None, ''' A writeable property to change the HTTP response status. It accepts either a numeric code (100-999) or a string with a custom reason phrase (e.g. "404 Brain not found"). Both :data:`status_line` and :data:`status_code` are updated accordingly. The return value is always a status string. ''') del _get_status, _set_status @property def headers(self): ''' An instance of :class:`HeaderDict`, a case-insensitive dict-like view on the response headers. ''' self.__dict__['headers'] = hdict = HeaderDict() hdict.dict = self._headers return hdict def __contains__(self, name): return _hkey(name) in self._headers def __delitem__(self, name): del self._headers[_hkey(name)] def __getitem__(self, name): return self._headers[_hkey(name)][-1] def __setitem__(self, name, value): self._headers[_hkey(name)] = [str(value)] def get_header(self, name, default=None): ''' Return the value of a previously defined header. If there is no header with that name, return a default value. ''' return self._headers.get(_hkey(name), [default])[-1] def set_header(self, name, value, append=False): ''' Create a new response header, replacing any previously defined headers with the same name. ''' if append: self.add_header(name, value) else: self._headers[_hkey(name)] = [str(value)] def add_header(self, name, value): ''' Add an additional response header, not removing duplicates. ''' self._headers.setdefault(_hkey(name), []).append(str(value)) def iter_headers(self): ''' Yield (header, value) tuples, skipping headers that are not allowed with the current response status code. ''' headers = self._headers.items() bad_headers = self.bad_headers.get(self._status_code) if bad_headers: headers = [h for h in headers if h[0] not in bad_headers] for name, values in headers: for value in values: yield name, value if self._cookies: for c in self._cookies.values(): yield 'Set-Cookie', c.OutputString() def wsgiheader(self): depr('The wsgiheader method is deprecated. See headerlist.') #0.10 return self.headerlist @property def headerlist(self): ''' WSGI conform list of (header, value) tuples. ''' return list(self.iter_headers()) content_type = HeaderProperty('Content-Type') content_length = HeaderProperty('Content-Length', reader=int) @property def charset(self): """ Return the charset specified in the content-type header (default: utf8). """ if 'charset=' in self.content_type: return self.content_type.split('charset=')[-1].split(';')[0].strip() return 'UTF-8' @property def COOKIES(self): """ A dict-like SimpleCookie instance. This should not be used directly. See :meth:`set_cookie`. """ depr('The COOKIES dict is deprecated. Use `set_cookie()` instead.') # 0.10 if not self._cookies: self._cookies = SimpleCookie() return self._cookies def set_cookie(self, name, value, secret=None, **options): ''' Create a new cookie or replace an old one. If the `secret` parameter is set, create a `Signed Cookie` (described below). :param name: the name of the cookie. :param value: the value of the cookie. :param secret: a signature key required for signed cookies. Additionally, this method accepts all RFC 2109 attributes that are supported by :class:`cookie.Morsel`, including: :param max_age: maximum age in seconds. (default: None) :param expires: a datetime object or UNIX timestamp. (default: None) :param domain: the domain that is allowed to read the cookie. (default: current domain) :param path: limits the cookie to a given path (default: current path) :param secure: limit the cookie to HTTPS connections (default: off). :param httponly: prevents client-side javascript to read this cookie (default: off, requires Python 2.6 or newer). If neither `expires` nor `max_age` is set (default), the cookie will expire at the end of the browser session (as soon as the browser window is closed). Signed cookies may store any pickle-able object and are cryptographically signed to prevent manipulation. Keep in mind that cookies are limited to 4kb in most browsers. Warning: Signed cookies are not encrypted (the client can still see the content) and not copy-protected (the client can restore an old cookie). The main intention is to make pickling and unpickling save, not to store secret information at client side. ''' if not self._cookies: self._cookies = SimpleCookie() if secret: value = touni(cookie_encode((name, value), secret)) elif not isinstance(value, basestring): raise TypeError('Secret key missing for non-string Cookie.') if len(value) > 4096: raise ValueError('Cookie value to long.') self._cookies[name] = value for key, value in options.items(): if key == 'max_age': if isinstance(value, timedelta): value = value.seconds + value.days * 24 * 3600 if key == 'expires': if isinstance(value, (datedate, datetime)): value = value.timetuple() elif isinstance(value, (int, float)): value = time.gmtime(value) value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value) self._cookies[name][key.replace('_', '-')] = value def delete_cookie(self, key, **kwargs): ''' Delete a cookie. Be sure to use the same `domain` and `path` settings as used to create the cookie. ''' kwargs['max_age'] = -1 kwargs['expires'] = 0 self.set_cookie(key, '', **kwargs) def __repr__(self): out = '' for name, value in self.headerlist: out += '%s: %s\n' % (name.title(), value.strip()) return out class LocalRequest(BaseRequest, threading.local): ''' A thread-local subclass of :class:`BaseRequest`. ''' def __init__(self): pass bind = BaseRequest.__init__ class LocalResponse(BaseResponse, threading.local): ''' A thread-local subclass of :class:`BaseResponse`. ''' bind = BaseResponse.__init__ Response = LocalResponse # BC 0.9 Request = LocalRequest # BC 0.9 ############################################################################### # Plugins ###################################################################### ############################################################################### class PluginError(BottleException): pass class JSONPlugin(object): name = 'json' api = 2 def __init__(self, json_dumps=json_dumps): self.json_dumps = json_dumps def apply(self, callback, context): dumps = self.json_dumps if not dumps: return callback def wrapper(*a, **ka): rv = callback(*a, **ka) if isinstance(rv, dict): #Attempt to serialize, raises exception on failure json_response = dumps(rv) #Set content type only if serialization succesful response.content_type = 'application/json' return json_response return rv return wrapper class HooksPlugin(object): name = 'hooks' api = 2 _names = 'before_request', 'after_request', 'app_reset' def __init__(self): self.hooks = dict((name, []) for name in self._names) self.app = None def _empty(self): return not (self.hooks['before_request'] or self.hooks['after_request']) def setup(self, app): self.app = app def add(self, name, func): ''' Attach a callback to a hook. ''' was_empty = self._empty() self.hooks.setdefault(name, []).append(func) if self.app and was_empty and not self._empty(): self.app.reset() def remove(self, name, func): ''' Remove a callback from a hook. ''' was_empty = self._empty() if name in self.hooks and func in self.hooks[name]: self.hooks[name].remove(func) if self.app and not was_empty and self._empty(): self.app.reset() def trigger(self, name, *a, **ka): ''' Trigger a hook and return a list of results. ''' hooks = self.hooks[name] if ka.pop('reversed', False): hooks = hooks[::-1] return [hook(*a, **ka) for hook in hooks] def apply(self, callback, context): if self._empty(): return callback def wrapper(*a, **ka): self.trigger('before_request') rv = callback(*a, **ka) self.trigger('after_request', reversed=True) return rv return wrapper class TemplatePlugin(object): ''' This plugin applies the :func:`view` decorator to all routes with a `template` config parameter. If the parameter is a tuple, the second element must be a dict with additional options (e.g. `template_engine`) or default variables for the template. ''' name = 'template' api = 2 def apply(self, callback, route): conf = route.config.get('template') if isinstance(conf, (tuple, list)) and len(conf) == 2: return view(conf[0], **conf[1])(callback) elif isinstance(conf, str) and 'template_opts' in route.config: depr('The `template_opts` parameter is deprecated.') #0.9 return view(conf, **route.config['template_opts'])(callback) elif isinstance(conf, str): return view(conf)(callback) else: return callback #: Not a plugin, but part of the plugin API. TODO: Find a better place. class _ImportRedirect(object): def __init__(self, name, impmask): ''' Create a virtual package that redirects imports (see PEP 302). ''' self.name = name self.impmask = impmask self.module = sys.modules.setdefault(name, imp.new_module(name)) self.module.__dict__.update({'__file__': __file__, '__path__': [], '__all__': [], '__loader__': self}) sys.meta_path.append(self) def find_module(self, fullname, path=None): if '.' not in fullname: return packname, modname = fullname.rsplit('.', 1) if packname != self.name: return return self def load_module(self, fullname): if fullname in sys.modules: return sys.modules[fullname] packname, modname = fullname.rsplit('.', 1) realname = self.impmask % modname __import__(realname) module = sys.modules[fullname] = sys.modules[realname] setattr(self.module, modname, module) module.__loader__ = self return module ############################################################################### # Common Utilities ############################################################# ############################################################################### class MultiDict(DictMixin): """ This dict stores multiple values per key, but behaves exactly like a normal dict in that it returns only the newest value for any given key. There are special methods available to access the full list of values. """ def __init__(self, *a, **k): self.dict = dict((k, [v]) for (k, v) in dict(*a, **k).items()) def __len__(self): return len(self.dict) def __iter__(self): return iter(self.dict) def __contains__(self, key): return key in self.dict def __delitem__(self, key): del self.dict[key] def __getitem__(self, key): return self.dict[key][-1] def __setitem__(self, key, value): self.append(key, value) def keys(self): return self.dict.keys() if py3k: def values(self): return (v[-1] for v in self.dict.values()) def items(self): return ((k, v[-1]) for k, v in self.dict.items()) def allitems(self): return ((k, v) for k, vl in self.dict.items() for v in vl) iterkeys = keys itervalues = values iteritems = items iterallitems = allitems else: def values(self): return [v[-1] for v in self.dict.values()] def items(self): return [(k, v[-1]) for k, v in self.dict.items()] def iterkeys(self): return self.dict.iterkeys() def itervalues(self): return (v[-1] for v in self.dict.itervalues()) def iteritems(self): return ((k, v[-1]) for k, v in self.dict.iteritems()) def iterallitems(self): return ((k, v) for k, vl in self.dict.iteritems() for v in vl) def allitems(self): return [(k, v) for k, vl in self.dict.iteritems() for v in vl] def get(self, key, default=None, index=-1, type=None): ''' Return the most recent value for a key. :param default: The default value to be returned if the key is not present or the type conversion fails. :param index: An index for the list of available values. :param type: If defined, this callable is used to cast the value into a specific type. Exception are suppressed and result in the default value to be returned. ''' try: val = self.dict[key][index] return type(val) if type else val except Exception: pass return default def append(self, key, value): ''' Add a new value to the list of values for this key. ''' self.dict.setdefault(key, []).append(value) def replace(self, key, value): ''' Replace the list of values with a single value. ''' self.dict[key] = [value] def getall(self, key): ''' Return a (possibly empty) list of values for a key. ''' return self.dict.get(key) or [] #: Aliases for WTForms to mimic other multi-dict APIs (Django) getone = get getlist = getall class FormsDict(MultiDict): ''' This :class:`MultiDict` subclass is used to store request form data. Additionally to the normal dict-like item access methods (which return unmodified data as native strings), this container also supports attribute-like access to its values. Attribues are automatiically de- or recoded to match :attr:`input_encoding` (default: 'utf8'). Missing attributes default to an empty string. ''' #: Encoding used for attribute values. input_encoding = 'utf8' def getunicode(self, name, default=None, encoding=None): value, enc = self.get(name, default), encoding or self.input_encoding try: if isinstance(value, bytes): # Python 2 WSGI return value.decode(enc) elif isinstance(value, unicode): # Python 3 WSGI return value.encode('latin1').decode(enc) return value except UnicodeError: return default def __getattr__(self, name, default=unicode()): return self.getunicode(name, default=default) class HeaderDict(MultiDict): """ A case-insensitive version of :class:`MultiDict` that defaults to replace the old value instead of appending it. """ def __init__(self, *a, **ka): self.dict = {} if a or ka: self.update(*a, **ka) def __contains__(self, key): return _hkey(key) in self.dict def __delitem__(self, key): del self.dict[_hkey(key)] def __getitem__(self, key): return self.dict[_hkey(key)][-1] def __setitem__(self, key, value): self.dict[_hkey(key)] = [str(value)] def append(self, key, value): self.dict.setdefault(_hkey(key), []).append(str(value)) def replace(self, key, value): self.dict[_hkey(key)] = [str(value)] def getall(self, key): return self.dict.get(_hkey(key)) or [] def get(self, key, default=None, index=-1): return MultiDict.get(self, _hkey(key), default, index) def filter(self, names): for name in [_hkey(n) for n in names]: if name in self.dict: del self.dict[name] class WSGIHeaderDict(DictMixin): ''' This dict-like class wraps a WSGI environ dict and provides convenient access to HTTP_* fields. Keys and values are native strings (2.x bytes or 3.x unicode) and keys are case-insensitive. If the WSGI environment contains non-native string values, these are de- or encoded using a lossless 'latin1' character set. The API will remain stable even on changes to the relevant PEPs. Currently PEP 333, 444 and 3333 are supported. (PEP 444 is the only one that uses non-native strings.) ''' #: List of keys that do not have a 'HTTP_' prefix. cgikeys = ('CONTENT_TYPE', 'CONTENT_LENGTH') def __init__(self, environ): self.environ = environ def _ekey(self, key): ''' Translate header field name to CGI/WSGI environ key. ''' key = key.replace('-','_').upper() if key in self.cgikeys: return key return 'HTTP_' + key def raw(self, key, default=None): ''' Return the header value as is (may be bytes or unicode). ''' return self.environ.get(self._ekey(key), default) def __getitem__(self, key): return tonat(self.environ[self._ekey(key)], 'latin1') def __setitem__(self, key, value): raise TypeError("%s is read-only." % self.__class__) def __delitem__(self, key): raise TypeError("%s is read-only." % self.__class__) def __iter__(self): for key in self.environ: if key[:5] == 'HTTP_': yield key[5:].replace('_', '-').title() elif key in self.cgikeys: yield key.replace('_', '-').title() def keys(self): return [x for x in self] def __len__(self): return len(self.keys()) def __contains__(self, key): return self._ekey(key) in self.environ class ConfigDict(dict): ''' A dict-subclass with some extras: You can access keys like attributes. Uppercase attributes create new ConfigDicts and act as name-spaces. Other missing attributes return None. Calling a ConfigDict updates its values and returns itself. >>> cfg = ConfigDict() >>> cfg.Namespace.value = 5 >>> cfg.OtherNamespace(a=1, b=2) >>> cfg {'Namespace': {'value': 5}, 'OtherNamespace': {'a': 1, 'b': 2}} ''' def __getattr__(self, key): if key not in self and key[0].isupper(): self[key] = ConfigDict() return self.get(key) def __setattr__(self, key, value): if hasattr(dict, key): raise AttributeError('Read-only attribute.') if key in self and self[key] and isinstance(self[key], ConfigDict): raise AttributeError('Non-empty namespace attribute.') self[key] = value def __delattr__(self, key): if key in self: del self[key] def __call__(self, *a, **ka): for key, value in dict(*a, **ka).items(): setattr(self, key, value) return self class AppStack(list): """ A stack-like list. Calling it returns the head of the stack. """ def __call__(self): """ Return the current default application. """ return self[-1] def push(self, value=None): """ Add a new :class:`Bottle` instance to the stack """ if not isinstance(value, Bottle): value = Bottle() self.append(value) return value class WSGIFileWrapper(object): def __init__(self, fp, buffer_size=1024*64): self.fp, self.buffer_size = fp, buffer_size for attr in ('fileno', 'close', 'read', 'readlines', 'tell', 'seek'): if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr)) def __iter__(self): buff, read = self.buffer_size, self.read while True: part = read(buff) if not part: return yield part ############################################################################### # Application Helper ########################################################### ############################################################################### def abort(code=500, text='Unknown Error: Application stopped.'): """ Aborts execution and causes a HTTP error. """ raise HTTPError(code, text) def redirect(url, code=None): """ Aborts execution and causes a 303 or 302 redirect, depending on the HTTP protocol version. """ if code is None: code = 303 if request.get('SERVER_PROTOCOL') == "HTTP/1.1" else 302 location = urljoin(request.url, url) raise HTTPResponse("", status=code, header=dict(Location=location)) def _file_iter_range(fp, offset, bytes, maxread=1024*1024): ''' Yield chunks from a range in a file. No chunk is bigger than maxread.''' fp.seek(offset) while bytes > 0: part = fp.read(min(bytes, maxread)) if not part: break bytes -= len(part) yield part def static_file(filename, root, mimetype='auto', download=False): """ Open a file in a safe way and return :exc:`HTTPResponse` with status code 200, 305, 401 or 404. Set Content-Type, Content-Encoding, Content-Length and Last-Modified header. Obey If-Modified-Since header and HEAD requests. """ root = os.path.abspath(root) + os.sep filename = os.path.abspath(os.path.join(root, filename.strip('/\\'))) header = dict() if not filename.startswith(root): return HTTPError(403, "Access denied.") if not os.path.exists(filename) or not os.path.isfile(filename): return HTTPError(404, "File does not exist.") if not os.access(filename, os.R_OK): return HTTPError(403, "You do not have permission to access this file.") if mimetype == 'auto': mimetype, encoding = mimetypes.guess_type(filename) if mimetype: header['Content-Type'] = mimetype if encoding: header['Content-Encoding'] = encoding elif mimetype: header['Content-Type'] = mimetype if download: download = os.path.basename(filename if download == True else download) header['Content-Disposition'] = 'attachment; filename="%s"' % download stats = os.stat(filename) header['Content-Length'] = clen = stats.st_size lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime)) header['Last-Modified'] = lm ims = request.environ.get('HTTP_IF_MODIFIED_SINCE') if ims: ims = parse_date(ims.split(";")[0].strip()) if ims is not None and ims >= int(stats.st_mtime): header['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()) return HTTPResponse(status=304, header=header) body = '' if request.method == 'HEAD' else open(filename, 'rb') header["Accept-Ranges"] = "bytes" ranges = request.environ.get('HTTP_RANGE') if 'HTTP_RANGE' in request.environ: ranges = list(parse_range_header(request.environ['HTTP_RANGE'], clen)) if not ranges: return HTTPError(416, "Requested Range Not Satisfiable") offset, end = ranges[0] header["Content-Range"] = "bytes %d-%d/%d" % (offset, end-1, clen) header["Content-Length"] = str(end-offset) if body: body = _file_iter_range(body, offset, end-offset) return HTTPResponse(body, header=header, status=206) return HTTPResponse(body, header=header) ############################################################################### # HTTP Utilities and MISC (TODO) ############################################### ############################################################################### def debug(mode=True): """ Change the debug level. There is only one debug level supported at the moment.""" global DEBUG DEBUG = bool(mode) def parse_date(ims): """ Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """ try: ts = email.utils.parsedate_tz(ims) return time.mktime(ts[:8] + (0,)) - (ts[9] or 0) - time.timezone except (TypeError, ValueError, IndexError, OverflowError): return None def parse_auth(header): """ Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None""" try: method, data = header.split(None, 1) if method.lower() == 'basic': user, pwd = touni(base64.b64decode(tob(data))).split(':',1) return user, pwd except (KeyError, ValueError): return None def parse_range_header(header, maxlen=0): ''' Yield (start, end) ranges parsed from a HTTP Range header. Skip unsatisfiable ranges. The end index is non-inclusive.''' if not header or header[:6] != 'bytes=': return ranges = [r.split('-', 1) for r in header[6:].split(',') if '-' in r] for start, end in ranges: try: if not start: # bytes=-100 -> last 100 bytes start, end = max(0, maxlen-int(end)), maxlen elif not end: # bytes=100- -> all but the first 99 bytes start, end = int(start), maxlen else: # bytes=100-200 -> bytes 100-200 (inclusive) start, end = int(start), min(int(end)+1, maxlen) if 0 <= start < end <= maxlen: yield start, end except ValueError: pass def _lscmp(a, b): ''' Compares two strings in a cryptographically save way: Runtime is not affected by length of common prefix. ''' return not sum(0 if x==y else 1 for x, y in zip(a, b)) and len(a) == len(b) def cookie_encode(data, key): ''' Encode and sign a pickle-able object. Return a (byte) string ''' msg = base64.b64encode(pickle.dumps(data, -1)) sig = base64.b64encode(hmac.new(tob(key), msg).digest()) return tob('!') + sig + tob('?') + msg def cookie_decode(data, key): ''' Verify and decode an encoded string. Return an object or None.''' data = tob(data) if cookie_is_encoded(data): sig, msg = data.split(tob('?'), 1) if _lscmp(sig[1:], base64.b64encode(hmac.new(tob(key), msg).digest())): return pickle.loads(base64.b64decode(msg)) return None def cookie_is_encoded(data): ''' Return True if the argument looks like a encoded cookie.''' return bool(data.startswith(tob('!')) and tob('?') in data) def html_escape(string): ''' Escape HTML special characters ``&<>`` and quotes ``'"``. ''' return string.replace('&','&amp;').replace('<','&lt;').replace('>','&gt;')\ .replace('"','&quot;').replace("'",'&#039;') def html_quote(string): ''' Escape and quote a string to be used as an HTTP attribute.''' return '"%s"' % html_escape(string).replace('\n','%#10;')\ .replace('\r','&#13;').replace('\t','&#9;') def yieldroutes(func): """ Return a generator for routes that match the signature (name, args) of the func parameter. This may yield more than one route if the function takes optional keyword arguments. The output is best described by example:: a() -> '/a' b(x, y) -> '/b/:x/:y' c(x, y=5) -> '/c/:x' and '/c/:x/:y' d(x=5, y=6) -> '/d' and '/d/:x' and '/d/:x/:y' """ import inspect # Expensive module. Only import if necessary. path = '/' + func.__name__.replace('__','/').lstrip('/') spec = inspect.getargspec(func) argc = len(spec[0]) - len(spec[3] or []) path += ('/:%s' * argc) % tuple(spec[0][:argc]) yield path for arg in spec[0][argc:]: path += '/:%s' % arg yield path def path_shift(script_name, path_info, shift=1): ''' Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa. :return: The modified paths. :param script_name: The SCRIPT_NAME path. :param script_name: The PATH_INFO path. :param shift: The number of path fragments to shift. May be negative to change the shift direction. (default: 1) ''' if shift == 0: return script_name, path_info pathlist = path_info.strip('/').split('/') scriptlist = script_name.strip('/').split('/') if pathlist and pathlist[0] == '': pathlist = [] if scriptlist and scriptlist[0] == '': scriptlist = [] if shift > 0 and shift <= len(pathlist): moved = pathlist[:shift] scriptlist = scriptlist + moved pathlist = pathlist[shift:] elif shift < 0 and shift >= -len(scriptlist): moved = scriptlist[shift:] pathlist = moved + pathlist scriptlist = scriptlist[:shift] else: empty = 'SCRIPT_NAME' if shift < 0 else 'PATH_INFO' raise AssertionError("Cannot shift. Nothing left from %s" % empty) new_script_name = '/' + '/'.join(scriptlist) new_path_info = '/' + '/'.join(pathlist) if path_info.endswith('/') and pathlist: new_path_info += '/' return new_script_name, new_path_info def validate(**vkargs): """ Validates and manipulates keyword arguments by user defined callables. Handles ValueError and missing arguments by raising HTTPError(403). """ depr('Use route wildcard filters instead.') def decorator(func): @functools.wraps(func) def wrapper(*args, **kargs): for key, value in vkargs.items(): if key not in kargs: abort(403, 'Missing parameter: %s' % key) try: kargs[key] = value(kargs[key]) except ValueError: abort(403, 'Wrong parameter format for: %s' % key) return func(*args, **kargs) return wrapper return decorator def auth_basic(check, realm="private", text="Access denied"): ''' Callback decorator to require HTTP auth (basic). TODO: Add route(check_auth=...) parameter. ''' def decorator(func): def wrapper(*a, **ka): user, password = request.auth or (None, None) if user is None or not check(user, password): response.headers['WWW-Authenticate'] = 'Basic realm="%s"' % realm return HTTPError(401, text) return func(*a, **ka) return wrapper return decorator # Shortcuts for common Bottle methods. # They all refer to the current default application. def make_default_app_wrapper(name): ''' Return a callable that relays calls to the current default app. ''' @functools.wraps(getattr(Bottle, name)) def wrapper(*a, **ka): return getattr(app(), name)(*a, **ka) return wrapper route = make_default_app_wrapper('route') get = make_default_app_wrapper('get') post = make_default_app_wrapper('post') put = make_default_app_wrapper('put') delete = make_default_app_wrapper('delete') error = make_default_app_wrapper('error') mount = make_default_app_wrapper('mount') hook = make_default_app_wrapper('hook') install = make_default_app_wrapper('install') uninstall = make_default_app_wrapper('uninstall') url = make_default_app_wrapper('get_url') ############################################################################### # Server Adapter ############################################################### ############################################################################### class ServerAdapter(object): quiet = False def __init__(self, host='127.0.0.1', port=8080, **config): self.options = config self.host = host self.port = int(port) def run(self, handler): # pragma: no cover pass def __repr__(self): args = ', '.join(['%s=%s'%(k,repr(v)) for k, v in self.options.items()]) return "%s(%s)" % (self.__class__.__name__, args) class CGIServer(ServerAdapter): quiet = True def run(self, handler): # pragma: no cover from wsgiref.handlers import CGIHandler def fixed_environ(environ, start_response): environ.setdefault('PATH_INFO', '') return handler(environ, start_response) CGIHandler().run(fixed_environ) class FlupFCGIServer(ServerAdapter): def run(self, handler): # pragma: no cover import flup.server.fcgi self.options.setdefault('bindAddress', (self.host, self.port)) flup.server.fcgi.WSGIServer(handler, **self.options).run() class WSGIRefServer(ServerAdapter): def run(self, handler): # pragma: no cover from wsgiref.simple_server import make_server, WSGIRequestHandler if self.quiet: class QuietHandler(WSGIRequestHandler): def log_request(*args, **kw): pass self.options['handler_class'] = QuietHandler srv = make_server(self.host, self.port, handler, **self.options) srv.serve_forever() class CherryPyServer(ServerAdapter): def run(self, handler): # pragma: no cover from cherrypy import wsgiserver server = wsgiserver.CherryPyWSGIServer((self.host, self.port), handler) try: server.start() finally: server.stop() class PasteServer(ServerAdapter): def run(self, handler): # pragma: no cover from paste import httpserver if not self.quiet: from paste.translogger import TransLogger handler = TransLogger(handler) httpserver.serve(handler, host=self.host, port=str(self.port), **self.options) class MeinheldServer(ServerAdapter): def run(self, handler): from meinheld import server server.listen((self.host, self.port)) server.run(handler) class FapwsServer(ServerAdapter): """ Extremely fast webserver using libev. See http://www.fapws.org/ """ def run(self, handler): # pragma: no cover import fapws._evwsgi as evwsgi from fapws import base, config port = self.port if float(config.SERVER_IDENT[-2:]) > 0.4: # fapws3 silently changed its API in 0.5 port = str(port) evwsgi.start(self.host, port) # fapws3 never releases the GIL. Complain upstream. I tried. No luck. if 'BOTTLE_CHILD' in os.environ and not self.quiet: _stderr("WARNING: Auto-reloading does not work with Fapws3.\n") _stderr(" (Fapws3 breaks python thread support)\n") evwsgi.set_base_module(base) def app(environ, start_response): environ['wsgi.multiprocess'] = False return handler(environ, start_response) evwsgi.wsgi_cb(('', app)) evwsgi.run() class TornadoServer(ServerAdapter): """ The super hyped asynchronous server by facebook. Untested. """ def run(self, handler): # pragma: no cover import tornado.wsgi, tornado.httpserver, tornado.ioloop container = tornado.wsgi.WSGIContainer(handler) server = tornado.httpserver.HTTPServer(container) server.listen(port=self.port) tornado.ioloop.IOLoop.instance().start() class AppEngineServer(ServerAdapter): """ Adapter for Google App Engine. """ quiet = True def run(self, handler): from google.appengine.ext.webapp import util # A main() function in the handler script enables 'App Caching'. # Lets makes sure it is there. This _really_ improves performance. module = sys.modules.get('__main__') if module and not hasattr(module, 'main'): module.main = lambda: util.run_wsgi_app(handler) util.run_wsgi_app(handler) class TwistedServer(ServerAdapter): """ Untested. """ def run(self, handler): from twisted.web import server, wsgi from twisted.python.threadpool import ThreadPool from twisted.internet import reactor thread_pool = ThreadPool() thread_pool.start() reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop) factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler)) reactor.listenTCP(self.port, factory, interface=self.host) reactor.run() class DieselServer(ServerAdapter): """ Untested. """ def run(self, handler): from diesel.protocols.wsgi import WSGIApplication app = WSGIApplication(handler, port=self.port) app.run() class GeventServer(ServerAdapter): """ Untested. Options: * `monkey` (default: True) fixes the stdlib to use greenthreads. * `fast` (default: False) uses libevent's http server, but has some issues: No streaming, no pipelining, no SSL. """ def run(self, handler): from gevent import wsgi as wsgi_fast, pywsgi, monkey, local if self.options.get('monkey', True): if not threading.local is local.local: monkey.patch_all() wsgi = wsgi_fast if self.options.get('fast') else pywsgi wsgi.WSGIServer((self.host, self.port), handler).serve_forever() class GunicornServer(ServerAdapter): """ Untested. See http://gunicorn.org/configure.html for options. """ def run(self, handler): from gunicorn.app.base import Application config = {'bind': "%s:%d" % (self.host, int(self.port))} config.update(self.options) class GunicornApplication(Application): def init(self, parser, opts, args): return config def load(self): return handler GunicornApplication().run() class EventletServer(ServerAdapter): """ Untested """ def run(self, handler): from eventlet import wsgi, listen try: wsgi.server(listen((self.host, self.port)), handler, log_output=(not self.quiet)) except TypeError: # Fallback, if we have old version of eventlet wsgi.server(listen((self.host, self.port)), handler) class RocketServer(ServerAdapter): """ Untested. """ def run(self, handler): from rocket import Rocket server = Rocket((self.host, self.port), 'wsgi', { 'wsgi_app' : handler }) server.start() class BjoernServer(ServerAdapter): """ Fast server written in C: https://github.com/jonashaag/bjoern """ def run(self, handler): from bjoern import run run(handler, self.host, self.port) class AutoServer(ServerAdapter): """ Untested. """ adapters = [PasteServer, TwistedServer, CherryPyServer, WSGIRefServer] def run(self, handler): for sa in self.adapters: try: return sa(self.host, self.port, **self.options).run(handler) except ImportError: pass server_names = { 'cgi': CGIServer, 'flup': FlupFCGIServer, 'wsgiref': WSGIRefServer, 'cherrypy': CherryPyServer, 'paste': PasteServer, 'fapws3': FapwsServer, 'tornado': TornadoServer, 'gae': AppEngineServer, 'twisted': TwistedServer, 'diesel': DieselServer, 'meinheld': MeinheldServer, 'gunicorn': GunicornServer, 'eventlet': EventletServer, 'gevent': GeventServer, 'rocket': RocketServer, 'bjoern' : BjoernServer, 'auto': AutoServer, } ############################################################################### # Application Control ########################################################## ############################################################################### def load(target, **namespace): """ Import a module or fetch an object from a module. * ``package.module`` returns `module` as a module object. * ``pack.mod:name`` returns the module variable `name` from `pack.mod`. * ``pack.mod:func()`` calls `pack.mod.func()` and returns the result. The last form accepts not only function calls, but any type of expression. Keyword arguments passed to this function are available as local variables. Example: ``import_string('re:compile(x)', x='[a-z]')`` """ module, target = target.split(":", 1) if ':' in target else (target, None) if module not in sys.modules: __import__(module) if not target: return sys.modules[module] if target.isalnum(): return getattr(sys.modules[module], target) package_name = module.split('.')[0] namespace[package_name] = sys.modules[package_name] return eval('%s.%s' % (module, target), namespace) def load_app(target): """ Load a bottle application from a module and make sure that the import does not affect the current default application, but returns a separate application object. See :func:`load` for the target parameter. """ global NORUN; NORUN, nr_old = True, NORUN try: tmp = default_app.push() # Create a new "default application" rv = load(target) # Import the target module return rv if callable(rv) else tmp finally: default_app.remove(tmp) # Remove the temporary added default application NORUN = nr_old _debug = debug def run(app=None, server='wsgiref', host='127.0.0.1', port=8080, interval=1, reloader=False, quiet=False, plugins=None, debug=False, **kargs): """ Start a server instance. This method blocks until the server terminates. :param app: WSGI application or target string supported by :func:`load_app`. (default: :func:`default_app`) :param server: Server adapter to use. See :data:`server_names` keys for valid names or pass a :class:`ServerAdapter` subclass. (default: `wsgiref`) :param host: Server address to bind to. Pass ``0.0.0.0`` to listens on all interfaces including the external one. (default: 127.0.0.1) :param port: Server port to bind to. Values below 1024 require root privileges. (default: 8080) :param reloader: Start auto-reloading server? (default: False) :param interval: Auto-reloader interval in seconds (default: 1) :param quiet: Suppress output to stdout and stderr? (default: False) :param options: Options passed to the server adapter. """ if NORUN: return if reloader and not os.environ.get('BOTTLE_CHILD'): try: fd, lockfile = tempfile.mkstemp(prefix='bottle.', suffix='.lock') os.close(fd) # We only need this file to exist. We never write to it while os.path.exists(lockfile): args = [sys.executable] + sys.argv environ = os.environ.copy() environ['BOTTLE_CHILD'] = 'true' environ['BOTTLE_LOCKFILE'] = lockfile p = subprocess.Popen(args, env=environ) while p.poll() is None: # Busy wait... os.utime(lockfile, None) # I am alive! time.sleep(interval) if p.poll() != 3: if os.path.exists(lockfile): os.unlink(lockfile) sys.exit(p.poll()) except KeyboardInterrupt: pass finally: if os.path.exists(lockfile): os.unlink(lockfile) return try: _debug(debug) app = app or default_app() if isinstance(app, basestring): app = load_app(app) if not callable(app): raise ValueError("Application is not callable: %r" % app) for plugin in plugins or []: app.install(plugin) if server in server_names: server = server_names.get(server) if isinstance(server, basestring): server = load(server) if isinstance(server, type): server = server(host=host, port=port, **kargs) if not isinstance(server, ServerAdapter): raise ValueError("Unknown or unsupported server: %r" % server) server.quiet = server.quiet or quiet if not server.quiet: _stderr("Bottle v%s server starting up (using %s)...\n" % (__version__, repr(server))) _stderr("Listening on http://%s:%d/\n" % (server.host, server.port)) _stderr("Hit Ctrl-C to quit.\n\n") if reloader: lockfile = os.environ.get('BOTTLE_LOCKFILE') bgcheck = FileCheckerThread(lockfile, interval) with bgcheck: server.run(app) if bgcheck.status == 'reload': sys.exit(3) else: server.run(app) except KeyboardInterrupt: pass except (SystemExit, MemoryError): raise except: if not reloader: raise if not getattr(server, 'quiet', quiet): print_exc() time.sleep(interval) sys.exit(3) class FileCheckerThread(threading.Thread): ''' Interrupt main-thread as soon as a changed module file is detected, the lockfile gets deleted or gets to old. ''' def __init__(self, lockfile, interval): threading.Thread.__init__(self) self.lockfile, self.interval = lockfile, interval #: Is one of 'reload', 'error' or 'exit' self.status = None def run(self): exists = os.path.exists mtime = lambda path: os.stat(path).st_mtime files = dict() for module in sys.modules.values(): path = getattr(module, '__file__', '') if path[-4:] in ('.pyo', '.pyc'): path = path[:-1] if path and exists(path): files[path] = mtime(path) while not self.status: if not exists(self.lockfile)\ or mtime(self.lockfile) < time.time() - self.interval - 5: self.status = 'error' thread.interrupt_main() for path, lmtime in files.items(): if not exists(path) or mtime(path) > lmtime: self.status = 'reload' thread.interrupt_main() break time.sleep(self.interval) def __enter__(self): self.start() def __exit__(self, exc_type, exc_val, exc_tb): if not self.status: self.status = 'exit' # silent exit self.join() return issubclass(exc_type, KeyboardInterrupt) ############################################################################### # Template Adapters ############################################################ ############################################################################### class TemplateError(HTTPError): def __init__(self, message): HTTPError.__init__(self, 500, message) class BaseTemplate(object): """ Base class and minimal API for template adapters """ extensions = ['tpl','html','thtml','stpl'] settings = {} #used in prepare() defaults = {} #used in render() def __init__(self, source=None, name=None, lookup=[], encoding='utf8', **settings): """ Create a new template. If the source parameter (str or buffer) is missing, the name argument is used to guess a template filename. Subclasses can assume that self.source and/or self.filename are set. Both are strings. The lookup, encoding and settings parameters are stored as instance variables. The lookup parameter stores a list containing directory paths. The encoding parameter should be used to decode byte strings or files. The settings parameter contains a dict for engine-specific settings. """ self.name = name self.source = source.read() if hasattr(source, 'read') else source self.filename = source.filename if hasattr(source, 'filename') else None self.lookup = [os.path.abspath(x) for x in lookup] self.encoding = encoding self.settings = self.settings.copy() # Copy from class variable self.settings.update(settings) # Apply if not self.source and self.name: self.filename = self.search(self.name, self.lookup) if not self.filename: raise TemplateError('Template %s not found.' % repr(name)) if not self.source and not self.filename: raise TemplateError('No template specified.') self.prepare(**self.settings) @classmethod def search(cls, name, lookup=[]): """ Search name in all directories specified in lookup. First without, then with common extensions. Return first hit. """ if os.path.isfile(name): return name for spath in lookup: fname = os.path.join(spath, name) if os.path.isfile(fname): return fname for ext in cls.extensions: if os.path.isfile('%s.%s' % (fname, ext)): return '%s.%s' % (fname, ext) @classmethod def global_config(cls, key, *args): ''' This reads or sets the global settings stored in class.settings. ''' if args: cls.settings = cls.settings.copy() # Make settings local to class cls.settings[key] = args[0] else: return cls.settings[key] def prepare(self, **options): """ Run preparations (parsing, caching, ...). It should be possible to call this again to refresh a template or to update settings. """ raise NotImplementedError def render(self, *args, **kwargs): """ Render the template with the specified local variables and return a single byte or unicode string. If it is a byte string, the encoding must match self.encoding. This method must be thread-safe! Local variables may be provided in dictionaries (*args) or directly, as keywords (**kwargs). """ raise NotImplementedError class MakoTemplate(BaseTemplate): def prepare(self, **options): from mako.template import Template from mako.lookup import TemplateLookup options.update({'input_encoding':self.encoding}) options.setdefault('format_exceptions', bool(DEBUG)) lookup = TemplateLookup(directories=self.lookup, **options) if self.source: self.tpl = Template(self.source, lookup=lookup, **options) else: self.tpl = Template(uri=self.name, filename=self.filename, lookup=lookup, **options) def render(self, *args, **kwargs): for dictarg in args: kwargs.update(dictarg) _defaults = self.defaults.copy() _defaults.update(kwargs) return self.tpl.render(**_defaults) class CheetahTemplate(BaseTemplate): def prepare(self, **options): from Cheetah.Template import Template self.context = threading.local() self.context.vars = {} options['searchList'] = [self.context.vars] if self.source: self.tpl = Template(source=self.source, **options) else: self.tpl = Template(file=self.filename, **options) def render(self, *args, **kwargs): for dictarg in args: kwargs.update(dictarg) self.context.vars.update(self.defaults) self.context.vars.update(kwargs) out = str(self.tpl) self.context.vars.clear() return out class Jinja2Template(BaseTemplate): def prepare(self, filters=None, tests=None, **kwargs): from jinja2 import Environment, FunctionLoader if 'prefix' in kwargs: # TODO: to be removed after a while raise RuntimeError('The keyword argument `prefix` has been removed. ' 'Use the full jinja2 environment name line_statement_prefix instead.') self.env = Environment(loader=FunctionLoader(self.loader), **kwargs) if filters: self.env.filters.update(filters) if tests: self.env.tests.update(tests) if self.source: self.tpl = self.env.from_string(self.source) else: self.tpl = self.env.get_template(self.filename) def render(self, *args, **kwargs): for dictarg in args: kwargs.update(dictarg) _defaults = self.defaults.copy() _defaults.update(kwargs) return self.tpl.render(**_defaults) def loader(self, name): fname = self.search(name, self.lookup) if not fname: return with open(fname, "rb") as f: return f.read().decode(self.encoding) class SimpleTALTemplate(BaseTemplate): ''' Deprecated, do not use. ''' def prepare(self, **options): depr('The SimpleTAL template handler is deprecated'\ ' and will be removed in 0.12') from simpletal import simpleTAL if self.source: self.tpl = simpleTAL.compileHTMLTemplate(self.source) else: with open(self.filename, 'rb') as fp: self.tpl = simpleTAL.compileHTMLTemplate(tonat(fp.read())) def render(self, *args, **kwargs): from simpletal import simpleTALES for dictarg in args: kwargs.update(dictarg) context = simpleTALES.Context() for k,v in self.defaults.items(): context.addGlobal(k, v) for k,v in kwargs.items(): context.addGlobal(k, v) output = StringIO() self.tpl.expand(context, output) return output.getvalue() class SimpleTemplate(BaseTemplate): blocks = ('if', 'elif', 'else', 'try', 'except', 'finally', 'for', 'while', 'with', 'def', 'class') dedent_blocks = ('elif', 'else', 'except', 'finally') @lazy_attribute def re_pytokens(cls): ''' This matches comments and all kinds of quoted strings but does NOT match comments (#...) within quoted strings. (trust me) ''' return re.compile(r''' (''(?!')|""(?!")|'{6}|"{6} # Empty strings (all 4 types) |'(?:[^\\']|\\.)+?' # Single quotes (') |"(?:[^\\"]|\\.)+?" # Double quotes (") |'{3}(?:[^\\]|\\.|\n)+?'{3} # Triple-quoted strings (') |"{3}(?:[^\\]|\\.|\n)+?"{3} # Triple-quoted strings (") |\#.* # Comments )''', re.VERBOSE) def prepare(self, escape_func=html_escape, noescape=False, **kwargs): self.cache = {} enc = self.encoding self._str = lambda x: touni(x, enc) self._escape = lambda x: escape_func(touni(x, enc)) if noescape: self._str, self._escape = self._escape, self._str @classmethod def split_comment(cls, code): """ Removes comments (#...) from python code. """ if '#' not in code: return code #: Remove comments only (leave quoted strings as they are) subf = lambda m: '' if m.group(0)[0]=='#' else m.group(0) return re.sub(cls.re_pytokens, subf, code) @cached_property def co(self): return compile(self.code, self.filename or '<string>', 'exec') @cached_property def code(self): stack = [] # Current Code indentation lineno = 0 # Current line of code ptrbuffer = [] # Buffer for printable strings and token tuple instances codebuffer = [] # Buffer for generated python code multiline = dedent = oneline = False template = self.source or open(self.filename, 'rb').read() def yield_tokens(line): for i, part in enumerate(re.split(r'\{\{(.*?)\}\}', line)): if i % 2: if part.startswith('!'): yield 'RAW', part[1:] else: yield 'CMD', part else: yield 'TXT', part def flush(): # Flush the ptrbuffer if not ptrbuffer: return cline = '' for line in ptrbuffer: for token, value in line: if token == 'TXT': cline += repr(value) elif token == 'RAW': cline += '_str(%s)' % value elif token == 'CMD': cline += '_escape(%s)' % value cline += ', ' cline = cline[:-2] + '\\\n' cline = cline[:-2] if cline[:-1].endswith('\\\\\\\\\\n'): cline = cline[:-7] + cline[-1] # 'nobr\\\\\n' --> 'nobr' cline = '_printlist([' + cline + '])' del ptrbuffer[:] # Do this before calling code() again code(cline) def code(stmt): for line in stmt.splitlines(): codebuffer.append(' ' * len(stack) + line.strip()) for line in template.splitlines(True): lineno += 1 line = line if isinstance(line, unicode)\ else unicode(line, encoding=self.encoding) if lineno <= 2: m = re.search(r"%.*coding[:=]\s*([-\w\.]+)", line) if m: self.encoding = m.group(1) if m: line = line.replace('coding','coding (removed)') if line.strip()[:2].count('%') == 1: line = line.split('%',1)[1].lstrip() # Full line following the % cline = self.split_comment(line).strip() cmd = re.split(r'[^a-zA-Z0-9_]', cline)[0] flush() # You are actually reading this? Good luck, it's a mess :) if cmd in self.blocks or multiline: cmd = multiline or cmd dedent = cmd in self.dedent_blocks # "else:" if dedent and not oneline and not multiline: cmd = stack.pop() code(line) oneline = not cline.endswith(':') # "if 1: pass" multiline = cmd if cline.endswith('\\') else False if not oneline and not multiline: stack.append(cmd) elif cmd == 'end' and stack: code('#end(%s) %s' % (stack.pop(), line.strip()[3:])) elif cmd == 'include': p = cline.split(None, 2)[1:] if len(p) == 2: code("_=_include(%s, _stdout, %s)" % (repr(p[0]), p[1])) elif p: code("_=_include(%s, _stdout)" % repr(p[0])) else: # Empty %include -> reverse of %rebase code("_printlist(_base)") elif cmd == 'rebase': p = cline.split(None, 2)[1:] if len(p) == 2: code("globals()['_rebase']=(%s, dict(%s))" % (repr(p[0]), p[1])) elif p: code("globals()['_rebase']=(%s, {})" % repr(p[0])) else: code(line) else: # Line starting with text (not '%') or '%%' (escaped) if line.strip().startswith('%%'): line = line.replace('%%', '%', 1) ptrbuffer.append(yield_tokens(line)) flush() return '\n'.join(codebuffer) + '\n' def subtemplate(self, _name, _stdout, *args, **kwargs): for dictarg in args: kwargs.update(dictarg) if _name not in self.cache: self.cache[_name] = self.__class__(name=_name, lookup=self.lookup) return self.cache[_name].execute(_stdout, kwargs) def execute(self, _stdout, *args, **kwargs): for dictarg in args: kwargs.update(dictarg) env = self.defaults.copy() env.update({'_stdout': _stdout, '_printlist': _stdout.extend, '_include': self.subtemplate, '_str': self._str, '_escape': self._escape, 'get': env.get, 'setdefault': env.setdefault, 'defined': env.__contains__}) env.update(kwargs) eval(self.co, env) if '_rebase' in env: subtpl, rargs = env['_rebase'] rargs['_base'] = _stdout[:] #copy stdout del _stdout[:] # clear stdout return self.subtemplate(subtpl,_stdout,rargs) return env def render(self, *args, **kwargs): """ Render the template using keyword arguments as local variables. """ for dictarg in args: kwargs.update(dictarg) stdout = [] self.execute(stdout, kwargs) return ''.join(stdout) def template(*args, **kwargs): ''' Get a rendered template as a string iterator. You can use a name, a filename or a template string as first parameter. Template rendering arguments can be passed as dictionaries or directly (as keyword arguments). ''' tpl = args[0] if args else None template_adapter = kwargs.pop('template_adapter', SimpleTemplate) if tpl not in TEMPLATES or DEBUG: settings = kwargs.pop('template_settings', {}) lookup = kwargs.pop('template_lookup', TEMPLATE_PATH) if isinstance(tpl, template_adapter): TEMPLATES[tpl] = tpl if settings: TEMPLATES[tpl].prepare(**settings) elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl: TEMPLATES[tpl] = template_adapter(source=tpl, lookup=lookup, **settings) else: TEMPLATES[tpl] = template_adapter(name=tpl, lookup=lookup, **settings) if not TEMPLATES[tpl]: abort(500, 'Template (%s) not found' % tpl) for dictarg in args[1:]: kwargs.update(dictarg) return TEMPLATES[tpl].render(kwargs) mako_template = functools.partial(template, template_adapter=MakoTemplate) cheetah_template = functools.partial(template, template_adapter=CheetahTemplate) jinja2_template = functools.partial(template, template_adapter=Jinja2Template) simpletal_template = functools.partial(template, template_adapter=SimpleTALTemplate) def view(tpl_name, **defaults): ''' Decorator: renders a template for a handler. The handler can control its behavior like that: - return a dict of template vars to fill out the template - return something other than a dict and the view decorator will not process the template, but return the handler result as is. This includes returning a HTTPResponse(dict) to get, for instance, JSON with autojson or other castfilters. ''' def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): result = func(*args, **kwargs) if isinstance(result, (dict, DictMixin)): tplvars = defaults.copy() tplvars.update(result) return template(tpl_name, **tplvars) return result return wrapper return decorator mako_view = functools.partial(view, template_adapter=MakoTemplate) cheetah_view = functools.partial(view, template_adapter=CheetahTemplate) jinja2_view = functools.partial(view, template_adapter=Jinja2Template) simpletal_view = functools.partial(view, template_adapter=SimpleTALTemplate) ############################################################################### # Constants and Globals ######################################################## ############################################################################### TEMPLATE_PATH = ['./', './views/'] TEMPLATES = {} DEBUG = False NORUN = False # If set, run() does nothing. Used by load_app() #: A dict to map HTTP status codes (e.g. 404) to phrases (e.g. 'Not Found') HTTP_CODES = httplib.responses HTTP_CODES[418] = "I'm a teapot" # RFC 2324 HTTP_CODES[428] = "Precondition Required" HTTP_CODES[429] = "Too Many Requests" HTTP_CODES[431] = "Request Header Fields Too Large" HTTP_CODES[511] = "Network Authentication Required" _HTTP_STATUS_LINES = dict((k, '%d %s'%(k,v)) for (k,v) in HTTP_CODES.items()) #: The default template used for error pages. Override with @error() ERROR_PAGE_TEMPLATE = """ %try: %from bottle import DEBUG, HTTP_CODES, request, touni %status_name = HTTP_CODES.get(e.status, 'Unknown').title() <!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN"> <html> <head> <title>Error {{e.status}}: {{status_name}}</title> <style type="text/css"> html {background-color: #eee; font-family: sans;} body {background-color: #fff; border: 1px solid #ddd; padding: 15px; margin: 15px;} pre {background-color: #eee; border: 1px solid #ddd; padding: 5px;} </style> </head> <body> <h1>Error {{e.status}}: {{status_name}}</h1> <p>Sorry, the requested URL <tt>{{repr(request.url)}}</tt> caused an error:</p> <pre>{{e.output}}</pre> %if DEBUG and e.exception: <h2>Exception:</h2> <pre>{{repr(e.exception)}}</pre> %end %if DEBUG and e.traceback: <h2>Traceback:</h2> <pre>{{e.traceback}}</pre> %end </body> </html> %except ImportError: <b>ImportError:</b> Could not generate the error page. Please add bottle to the import path. %end """ #: A thread-safe instance of :class:`Request` representing the `current` request. request = Request() #: A thread-safe instance of :class:`Response` used to build the HTTP response. response = Response() #: A thread-safe namespace. Not used by Bottle. local = threading.local() # Initialize app stack (create first empty Bottle app) # BC: 0.6.4 and needed for run() app = default_app = AppStack() app.push() #: A virtual package that redirects import statements. #: Example: ``import bottle.ext.sqlite`` actually imports `bottle_sqlite`. ext = _ImportRedirect(__name__+'.ext', 'bottle_%s').module if __name__ == '__main__': opt, args, parser = _cmd_options, _cmd_args, _cmd_parser if opt.version: _stdout('Bottle %s\n'%__version__) sys.exit(0) if not args: parser.print_help() _stderr('\nError: No application specified.\n') sys.exit(1) sys.path.insert(0, '.') sys.modules.setdefault('bottle', sys.modules['__main__']) host, port = (opt.bind or 'localhost'), 8080 if ':' in host: host, port = host.rsplit(':', 1) run(args[0], host=host, port=port, server=opt.server, reloader=opt.reload, plugins=opt.plugin, debug=opt.debug) # THE END
{ "content_hash": "e23ba8cffa771a3e77c849df1f438104", "timestamp": "", "source": "github", "line_count": 3018, "max_line_length": 103, "avg_line_length": 39.59145129224652, "alnum_prop": 0.5806070953325466, "repo_name": "ironexmaiden/csd_post_sw", "id": "cd7c88c5b5424bcefbf3afb2e07fbfee43cb1860", "size": "119533", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "build/lib/bottle.py", "mode": "33188", "license": "mit", "language": [ { "name": "JavaScript", "bytes": "3675" }, { "name": "Python", "bytes": "645173" } ], "symlink_target": "" }
""" Test that we can hit breakpoints in global constructors """ import lldb from lldbsuite.test.decorators import * from lldbsuite.test.lldbtest import * from lldbsuite.test import lldbutil class TestBreakpointInGlobalConstructors(TestBase): mydir = TestBase.compute_mydir(__file__) NO_DEBUG_INFO_TESTCASE = True @expectedFailureNetBSD def test(self): self.build() self.line_foo = line_number('foo.cpp', '// !BR_foo') self.line_main = line_number('main.cpp', '// !BR_main') target = self.dbg.CreateTarget(self.getBuildArtifact("a.out")) self.assertTrue(target, VALID_TARGET) env= self.registerSharedLibrariesWithTarget(target, ["foo"]) bp_main = lldbutil.run_break_set_by_file_and_line( self, 'main.cpp', self.line_main) bp_foo = lldbutil.run_break_set_by_file_and_line( self, 'foo.cpp', self.line_foo, num_expected_locations=-2) process = target.LaunchSimple( None, env, self.get_process_working_directory()) self.assertIsNotNone( lldbutil.get_one_thread_stopped_at_breakpoint_id( self.process(), bp_foo)) self.runCmd("continue") self.assertIsNotNone( lldbutil.get_one_thread_stopped_at_breakpoint_id( self.process(), bp_main))
{ "content_hash": "12ff3548d27e9e4fb5505c3dce29ef2b", "timestamp": "", "source": "github", "line_count": 46, "max_line_length": 70, "avg_line_length": 29.304347826086957, "alnum_prop": 0.6387240356083086, "repo_name": "endlessm/chromium-browser", "id": "4439607d91cfca3713ef00b1fde51d6cf83b27b3", "size": "1348", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "third_party/llvm/lldb/test/API/functionalities/breakpoint/global_constructor/TestBreakpointInGlobalConstructor.py", "mode": "33188", "license": "bsd-3-clause", "language": [], "symlink_target": "" }
from openstack_dashboard.api import swift from openstack_dashboard.test.test_data.utils import TestDataContainer def data(TEST): TEST.containers = TestDataContainer() TEST.objects = TestDataContainer() container_1 = swift.Container(dict(name=u"container_one\u6346")) container_2 = swift.Container(dict(name=u"container_two\u6346")) TEST.containers.add(container_1, container_2) object_dict = {"name": u"test_object\u6346", "content_type": u"text/plain", "bytes": 128, "last_modified": None, "hash": u"object_hash"} obj_dicts = [object_dict] obj_data = "Fake Data" for obj_dict in obj_dicts: swift_object = swift.StorageObject(obj_dict, container_1.name, data=obj_data) TEST.objects.add(swift_object)
{ "content_hash": "bd27f7e791fa36dc2d1af2d82c7dc2fe", "timestamp": "", "source": "github", "line_count": 26, "max_line_length": 70, "avg_line_length": 35.38461538461539, "alnum_prop": 0.5804347826086956, "repo_name": "tuskar/tuskar-ui", "id": "2feef81050c09806c699daa789dd7648d9bf27a0", "size": "1525", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "openstack_dashboard/test/test_data/swift_data.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "159761" }, { "name": "JavaScript", "bytes": "467747" }, { "name": "Python", "bytes": "2393436" }, { "name": "Shell", "bytes": "12884" } ], "symlink_target": "" }
from tests.test_stack import TestConfig, app_from_config def setup_noDB(): base_config = TestConfig(folder = 'config', values = {'use_sqlalchemy': False, 'use_toscawidgets': False, 'use_toscawidgets2':False} ) return app_from_config(base_config) def test_basic_stack(): app = setup_noDB() resp = app.get('/') assert resp.body.decode('ascii') == "my foo" def test_config_reading(): """Ensure that the config object can be read via dict and attr access""" app = setup_noDB() resp = app.get('/config_test') resp_body = resp.body.decode('ascii') assert "default_renderer" in resp_body resp = app.get('/config_attr_lookup') assert "genshi" in resp_body resp = app.get('/config_dotted_values') assert "root" in resp_body def test_config_writing(): """Ensure that new values can be added to the config object""" app = setup_noDB() value = "gooberblue" resp = app.get('/config_attr_set/'+value) resp_body = resp.body.decode('ascii') assert value in resp_body resp = app.get('/config_dict_set/'+value) assert value in resp_body
{ "content_hash": "65ac044e72d13e3be6829cffbda07973", "timestamp": "", "source": "github", "line_count": 38, "max_line_length": 76, "avg_line_length": 32.89473684210526, "alnum_prop": 0.5856, "repo_name": "lucius-feng/tg2", "id": "ca248ea65c83e583ffbc5f82fdb600f7241b8b8f", "size": "1250", "binary": false, "copies": "2", "ref": "refs/heads/development", "path": "tests/test_stack/config/test_config.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "11047" }, { "name": "Makefile", "bytes": "1115" }, { "name": "Python", "bytes": "764312" } ], "symlink_target": "" }
from __future__ import unicode_literals from markupsafe import escape from indico.modules.admin.views import WPAdmin from indico.util.i18n import _ from indico.util.mathjax import MathjaxMixin from indico.web.breadcrumbs import render_breadcrumbs from indico.web.views import WPDecorated, WPJinjaMixin, render_header class WPManageUpcomingEvents(WPAdmin): template_prefix = 'categories/' class WPCategory(MathjaxMixin, WPJinjaMixin, WPDecorated): """WP for category display pages""" template_prefix = 'categories/' ALLOW_JSON = False bundles = ('module_categories.js',) def __init__(self, rh, category, **kwargs): kwargs['category'] = category self.category = category self.atom_feed_url = kwargs.get('atom_feed_url') self.atom_feed_title = kwargs.get('atom_feed_title') if category: self.title = category.title WPDecorated.__init__(self, rh, **kwargs) self._mathjax = kwargs.pop('mathjax', False) def _get_header(self): return render_header(category=self.category, protected_object=self.category, local_tz=self.category.display_tzinfo.zone) def _get_body(self, params): return self._get_page_content(params) def _get_head_content(self): head_content = WPDecorated._get_head_content(self) if self.atom_feed_url: title = self.atom_feed_title or _("Indico Atom feed") head_content += ('<link rel="alternate" type="application/atom+xml" title="{}" href="{}">' .format(escape(title), self.atom_feed_url)) if self._mathjax: head_content += MathjaxMixin._get_head_content(self) return head_content def _get_breadcrumbs(self): if not self.category or self.category.is_root: return '' return render_breadcrumbs(category=self.category) class WPCategoryCalendar(WPCategory): """WP for category calendar page""" bundles = ('module_categories.calendar.js', 'module_categories.calendar.css') class WPCategoryManagement(WPCategory): """WP for category management pages""" MANAGEMENT = True bundles = ('module_categories.management.js',) def __init__(self, rh, category, active_menu_item, **kwargs): kwargs['active_menu_item'] = active_menu_item WPCategory.__init__(self, rh, category, **kwargs) def _get_header(self): return render_header(category=self.category, protected_object=self.category, local_tz=self.category.timezone, force_local_tz=True) def _get_breadcrumbs(self): if self.category.is_root: return '' return render_breadcrumbs(category=self.category, management=True) class WPCategoryStatistics(WPCategory): bundles = ('module_categories.css',)
{ "content_hash": "3afe6f57b531c2acfe12da173b7c80aa", "timestamp": "", "source": "github", "line_count": 83, "max_line_length": 102, "avg_line_length": 34.46987951807229, "alnum_prop": 0.6553652569031807, "repo_name": "mic4ael/indico", "id": "6e32fae752471432c7086b0fba829a5a28cac819", "size": "3075", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "indico/modules/categories/views.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "553825" }, { "name": "HTML", "bytes": "1375160" }, { "name": "JavaScript", "bytes": "1852830" }, { "name": "Mako", "bytes": "1340" }, { "name": "Python", "bytes": "4612709" }, { "name": "Shell", "bytes": "2665" }, { "name": "TeX", "bytes": "23292" }, { "name": "XSLT", "bytes": "1504" } ], "symlink_target": "" }
""" FASTMODE -- Provide real time response for each program. First, this checks to see if the proposal asks for fasttime response. If so, the task then copies the data to the fast directory for that proposal on saltpipe. If it is the first object data for that proposal, then it will also send an email to the contact PI Author Version Date ----------------------------------------------- S M Crawford (SAAO) 0.1 18 Jan 2012 """ import os import saltsafemysql as saltmysql import saltsafestring as saltstring from saltsafeio import email as sendemail def runfast(filename, propcode, obsdate, server, readmefile, sdbhost, sdbname, sdbuser, password): """Handle fast data delivery for the proposal. For a given filename """ if propcode is None or propcode=='None': return #first check in the sdb if fast data delivery is needed sdb=saltmysql.connectdb(sdbhost,sdbname, sdbuser, password) select_term='Distinct Surname, email, username, ProposalCode_Id' from_term=''' Block join Pointing using (Block_Id) join PipelineConfig using (Pointing_Id) join Proposal using (Proposal_Id) join ProposalCode using (ProposalCode_Id) join PipelineDataAccessMethod using (PipelineDataAccessMethod_Id) join ProposalContact using (Proposal_Id) join Investigator on (Investigator_Id=Contact_Id) join PiptUser using (PiptUser_Id) ''' where_term="Proposal_Code like '%s' and current=1 and DataAccessMethod='Fast'" \ % (propcode) #print 'Select %s from %s where %s' % (select_term, from_term, where_term) try: record=saltmysql.select(sdb, select_term, from_term, where_term) except Exception, e: print e return None #print "Checking for fast data" #print record if record: surname, email, username, propid= record[0] #print surname, email, username, propid else: return #second if so, then copy the data to the contact PI directory #on saltpipe under the fast directory. #rawfilename=getrawfilename(filename) y=os.system('scp %s sa@saltpipe:/salt/ftparea/%s/fast%s/' % (filename, username, obsdate)) if y==256: y=os.system('ssh sa@saltpipe mkdir /salt/ftparea/%s/fast%s' % (username, obsdate)) y=os.system('scp %s sa@saltpipe:/salt/ftparea/%s/fast%s/' % (filename, username, obsdate)) if y!=0: print "Problem with copying file %s to /salt/ftparea/%s/fast%s/" % (filename, username, obsdate) #copy the reduced data y=os.system('scp mbxp%s sa@saltpipe:/salt/ftparea/%s/fast%s/' % (os.path.basename(filename), username, obsdate)) #check the type of data it is and copy over an ancillery data as well #if it is the first object file, check to see if an email has been #sent, and if not, send email #try to copy the spectroscopic data print filename, filename.startswith('P') if os.path.basename(filename).startswith('P'): sfilename='smbxp%s.txt' % (os.path.basename(filename).split('.fits')[0]) print sfilename try: y=os.system('scp %s sa@saltpipe:/salt/ftparea/%s/fast%s/' % (sfilename, username, obsdate)) except Exception, e: print e if os.path.basename(filename).startswith('S'): try: sfilename='mbxp%s.cat' % (os.path.basename(filename).split('.fits')[0]) print sfilename y=os.system('scp %s sa@saltpipe:/salt/ftparea/%s/fast%s/' % (sfilename, username, obsdate)) except Exception, e: print e #check to see if an email has been sent select_term='PipelineStatus' from_term=''' PipelineProposalStatistics join PipelineStatus using (PipelineStatus_Id) join NightInfo using (NightInfo_Id) join ProposalCode using (ProposalCode_Id) ''' where_term="Proposal_Code like '%s' and Date='%s-%s-%s'" % (propcode, obsdate[0:4], obsdate[4:6], obsdate[6:8]) print select_term, from_term, where_term try: record=saltmysql.select(sdb, select_term, from_term, where_term)[0][0] except: record=None print record if record=='FastEmail': return else: #insert information into the database nightinfoid=saltmysql.getnightinfoid(sdb, obsdate) insert_term="NightInfo_Id=%i, ProposalCode_Id=%i, PipelineStatus_Id=8" % (nightinfoid, propid) table_term="PipelineProposalStatistics" saltmysql.insert(sdb, insert_term, "PipelineProposalStatistics") #send email sender='sa@salt.ac.za' recipient=email bcc='crawfordsm@gmail.com' subject='SALT data available for %s' % propcode message=open(readmefile).read() message=message.replace('OBSDATE', obsdate) sendemail(server,'sa',password,sender,recipient,bcc, subject,message) sdb.close() return def getrawfilepath(filename): """Given a raw file name, returns the path on the SALT server of the raw file""" if filename.count('S'): ddir='salt/scam/' i=filename.index('S') filedate=saltstring.filedate(filename[i:]) elif filename.count('P'): ddir='salt/rss/' i=filename.index('P') filedate=saltstring.filedate(filename[i:]) print ddir, filedate return '%s%s/%s/raw/%s' % (ddir, filedate[0:4], filedate[4:8], filename)
{ "content_hash": "d559b41ca8f805dc9aa3370a7f93d26f", "timestamp": "", "source": "github", "line_count": 139, "max_line_length": 115, "avg_line_length": 38.07913669064748, "alnum_prop": 0.6671075004723219, "repo_name": "crawfordsm/pysalt", "id": "260fc386bc1e5350beeceffbc64d1957dfa3c61c", "size": "7657", "binary": false, "copies": "1", "ref": "refs/heads/placeholder", "path": "plugins/fastmode.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "9334" }, { "name": "Common Lisp", "bytes": "19932" }, { "name": "Makefile", "bytes": "856" }, { "name": "Python", "bytes": "1381161" }, { "name": "Smalltalk", "bytes": "271" } ], "symlink_target": "" }
from pickle_file import PickleFile from csv_file import CSVFile from txt_file import TXTFile from json_file import JSONFile from s3_file import S3File from tar_file import TARFile from ftp_file import FTPFile
{ "content_hash": "a9727fd49d245e31f70d5b15676d713e", "timestamp": "", "source": "github", "line_count": 7, "max_line_length": 34, "avg_line_length": 29.857142857142858, "alnum_prop": 0.8325358851674641, "repo_name": "nathandunn/agr", "id": "76a95a6ebc77e115de359d5043fcad6cb79ea7d7", "size": "210", "binary": false, "copies": "3", "ref": "refs/heads/development", "path": "indexer/src/files/__init__.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "20834" }, { "name": "Gherkin", "bytes": "2596" }, { "name": "HTML", "bytes": "1517" }, { "name": "JavaScript", "bytes": "124017" }, { "name": "Makefile", "bytes": "1811" }, { "name": "Python", "bytes": "143477" }, { "name": "Shell", "bytes": "1034" } ], "symlink_target": "" }
import threading import time def foo(iterations): for i in range(iterations): print threading.currentThread().name, print threading.currentThread().ident, print threading.activeCount(), print threading.enumerate() time.sleep(0.2) def main(): """Main function""" thread1 = threading.Thread(target=foo, args=(10,)) thread2 = threading.Thread(target=foo, args=(15,)) thread1.start() thread2.start() # Let the main thread do something too... for i in range(5): print threading.currentThread().name time.sleep(0.2) # Main thread waits for all threads to complete thread1.join() thread2.join() print "Exiting Main Thread" if __name__ == '__main__': main()
{ "content_hash": "9ebad270106639948b7fe919d0ce94bf", "timestamp": "", "source": "github", "line_count": 32, "max_line_length": 54, "avg_line_length": 23.90625, "alnum_prop": 0.6287581699346405, "repo_name": "jeremiedecock/snippets", "id": "7f5c5f0766911dc9e7b3873ba78d61c44a1ed777", "size": "1000", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "python/threading/hello_meth2.py", "mode": "33261", "license": "mit", "language": [ { "name": "AMPL", "bytes": "4294" }, { "name": "Batchfile", "bytes": "6779" }, { "name": "C", "bytes": "102107" }, { "name": "C++", "bytes": "320943" }, { "name": "CMake", "bytes": "11424" }, { "name": "CSS", "bytes": "21121" }, { "name": "Cython", "bytes": "21" }, { "name": "Dockerfile", "bytes": "1818" }, { "name": "Fortran", "bytes": "633" }, { "name": "Gnuplot", "bytes": "39999" }, { "name": "Go", "bytes": "3166" }, { "name": "Groovy", "bytes": "3009" }, { "name": "HTML", "bytes": "138995" }, { "name": "IDL", "bytes": "43" }, { "name": "Java", "bytes": "120221" }, { "name": "JavaScript", "bytes": "32342" }, { "name": "Jinja", "bytes": "206" }, { "name": "Jupyter Notebook", "bytes": "95991" }, { "name": "Lua", "bytes": "200" }, { "name": "M4", "bytes": "111" }, { "name": "MATLAB", "bytes": "31972" }, { "name": "Makefile", "bytes": "81307" }, { "name": "OpenSCAD", "bytes": "14995" }, { "name": "PHP", "bytes": "94" }, { "name": "Perl", "bytes": "46" }, { "name": "Processing", "bytes": "208" }, { "name": "Prolog", "bytes": "454" }, { "name": "Python", "bytes": "1685966" }, { "name": "R", "bytes": "76" }, { "name": "Raku", "bytes": "43" }, { "name": "Ruby", "bytes": "42" }, { "name": "Scheme", "bytes": "649" }, { "name": "Shell", "bytes": "52865" }, { "name": "Smalltalk", "bytes": "55" }, { "name": "TeX", "bytes": "1189" }, { "name": "Vue", "bytes": "49445" }, { "name": "XSLT", "bytes": "1816" } ], "symlink_target": "" }
""" Provides modules containing classes to support Web Services (SOAP) bindings. """
{ "content_hash": "847e3f4f98a41ad2ed57ce6eaa858446", "timestamp": "", "source": "github", "line_count": 3, "max_line_length": 76, "avg_line_length": 28.333333333333332, "alnum_prop": 0.7529411764705882, "repo_name": "c2theg/DDoS_Information_Sharing", "id": "1704fa9d72390a0303d0a527466731f8547eb396", "size": "914", "binary": false, "copies": "20", "ref": "refs/heads/master", "path": "libraries/suds-jurko-0.6/suds/bindings/__init__.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "29713" }, { "name": "HTML", "bytes": "36245" }, { "name": "JavaScript", "bytes": "931" }, { "name": "Python", "bytes": "851500" }, { "name": "Shell", "bytes": "8895" } ], "symlink_target": "" }
import unittest from sqlalchemy import create_engine, MetaData, Table, Integer, String, Column from cubes import * from ...common import CubesTestCaseBase from json import dumps def printable(obj): return dumps(obj, indent=4) class JoinsTestCaseBase(CubesTestCaseBase): sql_engine = "sqlite:///" def setUp(self): super(JoinsTestCaseBase, self).setUp() self.facts = Table("facts", self.metadata, Column("id", Integer), Column("id_date", Integer), Column("id_city", Integer), Column("amount", Integer) ) self.dim_date = Table("dim_date", self.metadata, Column("id", Integer), Column("year", Integer), Column("month", Integer), Column("day", Integer) ) self.dim_city = Table("dim_city", self.metadata, Column("id", Integer), Column("name", Integer), Column("country_code", Integer) ) self.dim_country = Table("dim_country", self.metadata, Column("code", String), Column("name", Integer) ) self.metadata.create_all() data = [ # Master-detail Match ( 1, 20130901, 1, 20), ( 2, 20130902, 1, 20), ( 3, 20130903, 1, 20), ( 4, 20130910, 1, 20), ( 5, 20130915, 1, 20), # -------- # ∑ 100 # No city dimension ( 6, 20131001, 9, 200), ( 7, 20131002, 9, 200), ( 8, 20131004, 9, 200), ( 9, 20131101, 7, 200), (10, 20131201, 7, 200), # -------- # ∑ 1000 # ======== # ∑ 1100 ] self.load_data(self.facts, data) data = [ (1, "Bratislava", "sk"), (2, "New York", "us") ] self.load_data(self.dim_city, data) data = [ ("sk", "Slovakia"), ("us", "United States") ] self.load_data(self.dim_country, data) data = [] for day in range(1, 31): row = (20130900+day, 2013, 9, day) data.append(row) self.load_data(self.dim_date, data) self.workspace = Workspace() self.workspace.register_default_store("sql", engine=self.engine, dimension_prefix="dim_") self.workspace.import_model(self.model_path("joins.json")) self.cube = self.workspace.cube("facts") class JoinsTestCase(JoinsTestCaseBase): def setUp(self): super(JoinsTestCase, self).setUp() self.day_drilldown = [("date", "default", "day")] self.month_drilldown = [("date", "default", "month")] self.year_drilldown = [("date", "default", "year")] self.city_drilldown = [("city")] def test_empty(self): browser = self.workspace.browser("facts") result = browser.aggregate() self.assertEqual(1100, result.summary["amount_sum"]) def aggregate_summary(self, cube, *args, **kwargs): browser = self.workspace.browser(cube) result = browser.aggregate(*args, **kwargs) return result.summary def aggregate_cells(self, cube, *args, **kwargs): browser = self.workspace.browser(cube) result = browser.aggregate(*args, **kwargs) return list(result.cells) def test_cell_count_match(self): cells = self.aggregate_cells("facts", drilldown=self.city_drilldown) self.assertEqual(1, len(cells)) self.assertEqual(100, cells[0]["amount_sum"]) self.assertEqual("Bratislava", cells[0]["city.name"]) def test_cell_count_master(self): cells = self.aggregate_cells("facts_master", drilldown=self.city_drilldown) summary = self.aggregate_summary("facts_master", drilldown=self.city_drilldown) self.assertEqual(1100, summary["amount_sum"]) cells = self.aggregate_cells("facts_master", drilldown=self.city_drilldown) self.assertEqual(2, len(cells)) names = [cell["city.name"] for cell in cells] self.assertSequenceEqual([None, "Bratislava"], names) amounts = [cell["amount_sum"] for cell in cells] self.assertSequenceEqual([1000, 100], amounts) def test_cell_count_detail(self): summary = self.aggregate_summary("facts_detail_city", drilldown=self.city_drilldown) self.assertEqual(100, summary["amount_sum"]) cells = self.aggregate_cells("facts_detail_city", drilldown=self.city_drilldown) self.assertEqual(2, len(cells)) names = [cell["city.name"] for cell in cells] self.assertSequenceEqual(["Bratislava", "New York"], names) amounts = [cell["amount_sum"] for cell in cells] self.assertSequenceEqual([100, 0], amounts) def test_cell_count_detail_not_found(self): cube = self.workspace.cube("facts_detail_city") cell = Cell(cube, [PointCut("city", [2])]) browser = self.workspace.browser(cube) result = browser.aggregate(cell, drilldown=[("city", None, "city")]) cells = list(result.cells) # We have one cell – one city from dim (nothing from facts) self.assertEqual(1, len(cells)) # ... however, we have no facts with that city. self.assertEqual(0, result.summary["record_count"]) # The summary should be coalesced to zero self.assertEqual(0, result.summary["amount_sum"]) names = [cell["city.name"] for cell in cells] self.assertSequenceEqual(["New York"], names) def test_three_tables(self): summary = self.aggregate_summary("threetables", drilldown=self.city_drilldown) self.assertEqual(100, summary["amount_sum"]) drilldown = self.city_drilldown+self.year_drilldown cells = self.aggregate_cells("threetables", drilldown=drilldown) self.assertEqual(1, len(cells)) def test_condition_and_drilldown(self): cube = self.workspace.cube("condition_and_drilldown") cell = Cell(cube, [PointCut("city", [2])]) dd = [("date", None, "day")] cells = self.aggregate_cells("condition_and_drilldown", cell=cell, drilldown=dd) # We want every day from the date table self.assertEqual(30, len(cells)) self.assertIn("record_count", cells[0]) self.assertIn("amount_sum", cells[0]) self.assertIn("date.year", cells[0]) self.assertIn("date.month", cells[0]) self.assertIn("date.day", cells[0]) self.assertNotIn("city.id", cells[0]) def test_split(self): cube = self.workspace.cube("condition_and_drilldown") split = Cell(cube, [RangeCut("date", [2013, 9, 1], [2013, 9, 3])]) cells = self.aggregate_cells("condition_and_drilldown", split=split) # We want every day from the date table self.assertEqual(2, len(cells)) self.assertIn(SPLIT_DIMENSION_NAME, cells[0]) # Both: master and detail split cube = self.workspace.cube("condition_and_drilldown") split = Cell(cube, [ RangeCut("date", [2013, 9, 1], [2013, 9, 3]), PointCut("city", [1]) ]) cells = self.aggregate_cells("condition_and_drilldown", split=split) # We want every day from the date table self.assertEqual(2, len(cells)) self.assertIn(SPLIT_DIMENSION_NAME, cells[0]) @unittest.skip("not yet") class JoinAggregateCompositionTestCase(JoinsTestCaseBase): def setUp(self): super(JoinAggregateCompositionTestCase, self).setUp() self.cube = self.workspace.cube("matchdetail") MD = [("date_master", "default", "day")] DD = [("date_detail", "default", "day")] MC = Cell(self.cube, [PointCut("city_master", [2])]) DC = Cell(self.cube, [PointCut("city_detail", [2])]) cases = [ { "args": (None, None, None, None), "cells": 0 }, { "args": ( MD, None, None, None), "cells": 5 }, { "args": (None, MC, None, None), "cells": 0 }, { "args": ( MD, MC, None, None), "cells": 0 }, { "args": (None, None, DD, None), "cells": 0 }, { "args": ( MD, None, DD, None), "cells": 0 }, { "args": (None, MC, DD, None), "cells": 0 }, { "args": ( MD, MC, DD, None), "cells": 0 }, { "args": (None, None, None, DC), "cells": 0 }, { "args": ( MD, None, None, DC), "cells": 0 }, { "args": (None, MC, None, DC), "cells": 0 }, { "args": ( MD, MC, None, DC), "cells": 0 }, { "args": (None, None, DD, DC), "cells": 0 }, { "args": ( MD, None, DD, DC), "cells": 0 }, { "args": (None, MC, DD, DC), "cells": 0 }, { "args": ( MD, MC, DD, DC), "cells": 0 } ] def test_all(self): pass
{ "content_hash": "c84f88089aea1af93c00c396ff57b6f4", "timestamp": "", "source": "github", "line_count": 306, "max_line_length": 88, "avg_line_length": 34.290849673202615, "alnum_prop": 0.4731725912513104, "repo_name": "she11c0de/cubes", "id": "134377d67c5212456e0268b9562155fed89f2373", "size": "10522", "binary": false, "copies": "1", "ref": "refs/heads/unicode-fix", "path": "tests/backends/sql/joins.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "38599" }, { "name": "HTML", "bytes": "66157" }, { "name": "JavaScript", "bytes": "362898" }, { "name": "Python", "bytes": "795339" }, { "name": "VimL", "bytes": "2215" } ], "symlink_target": "" }
import json from junit_xml import TestSuite, TestCase class JunitFormatter(object): def __init__(self, project_cfg, project_result): """Initialize the stuff""" self.testcases = { unicode(item["id"]): item for item in project_cfg["testcases"] } test_cases = [] for case in project_result["results"]: tc = TestCase( u"{0}".format(self.testcases[str(case["testcase_id"])]["name"]), elapsed_sec=case["duration_sec"] ) if case["status"] == "failed": # Last error and first error message tc.add_error_info(case["steps_results"][-1]["errors"][0]["message"]) test_cases.append(tc) self.test_suite = TestSuite( name=u"Project {0}".format(project_cfg["project_name"]), test_cases=test_cases ) def to_file(self, filename): """ Output project results to specified filename """ with open(filename, 'w') as f: f.write( TestSuite.to_xml_string( [self.test_suite], prettyprint=True, encoding="utf-8" ).encode("utf-8") )
{ "content_hash": "6e36fff851daf952d65640bd14ff4639", "timestamp": "", "source": "github", "line_count": 39, "max_line_length": 84, "avg_line_length": 31.564102564102566, "alnum_prop": 0.5199025182778229, "repo_name": "apiwatcher/apilisk", "id": "cd870c65b26f112bf629e13e634681c327f660a7", "size": "1256", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "apilisk/junit_formatter.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "29857" } ], "symlink_target": "" }
__import__( "Gaffer" ) __import__( "GafferImage" ) from _GafferScene import * from ScenePath import ScenePath from ScriptProcedural import ScriptProcedural from AlembicPath import AlembicPath
{ "content_hash": "06349067f2c5f90b59cf933cddba633c", "timestamp": "", "source": "github", "line_count": 8, "max_line_length": 45, "avg_line_length": 24.25, "alnum_prop": 0.7835051546391752, "repo_name": "davidsminor/gaffer", "id": "1acfb8ab0a0a31b3228014dfc26f210099b63588", "size": "2073", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "python/GafferScene/__init__.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "9286" }, { "name": "C++", "bytes": "3358250" }, { "name": "COBOL", "bytes": "64449" }, { "name": "CSS", "bytes": "28027" }, { "name": "Python", "bytes": "3267354" }, { "name": "Shell", "bytes": "7055" }, { "name": "Slash", "bytes": "35200" } ], "symlink_target": "" }
import os import csv import requests from datetime import datetime import simplejson as json import platform import base64 import ohmysportsfeedspy from ohmysportsfeedspy.v1_0 import API_v1_0 # API class for dealing with v2.0 of the API class API_v2_0(API_v1_0): # Constructor def __init__(self, verbose, store_type=None, store_location=None): super().__init__(verbose, store_type, store_location) self.base_url = "https://api.mysportsfeeds.com/v2.0/pull" self.valid_feeds = [ 'seasonal_games', 'daily_games', 'weekly_games', 'seasonal_dfs', 'daily_dfs', 'weekly_dfs', 'seasonal_player_gamelogs', 'daily_player_gamelogs', 'weekly_player_gamelogs', 'seasonal_team_gamelogs', 'daily_team_gamelogs', 'weekly_team_gamelogs', 'game_boxscore', 'game_playbyplay', 'game_lineup', 'current_season', 'player_injuries', 'latest_updates', 'seasonal_team_stats', 'seasonal_player_stats', 'seasonal_venues', 'players', 'seasonal_standings', 'daily_game_lines', 'daily_futures' ] # Feed URL def determine_url(self, league, season, feed, output_format, params): if feed == "seasonal_games": if season == "": raise AssertionError("You must specify a season for this request.") return "{base_url}/{league}/{season}/games.{output}".format(base_url=self.base_url, league=league, season=season, output=output_format) elif feed == "daily_games": if season == "": raise AssertionError("You must specify a season for this request.") if not "date" in params: raise AssertionError("You must specify a 'date' param for this request.") return "{base_url}/{league}/{season}/date/{date}/games.{output}".format(base_url=self.base_url, league=league, season=season, date=params["date"], output=output_format) elif feed == "weekly_games": if season == "": raise AssertionError("You must specify a season for this request.") if not "week" in params: raise AssertionError("You must specify a 'week' param for this request.") return "{base_url}/{league}/{season}/week/{week}/games.{output}".format(base_url=self.base_url, league=league, season=season, week=params["week"], output=output_format) elif feed == "seasonal_dfs": if season == "": raise AssertionError("You must specify a season for this request.") return "{base_url}/{league}/{season}/dfs.{output}".format(base_url=self.base_url, league=league, season=season, output=output_format) elif feed == "daily_dfs": if season == "": raise AssertionError("You must specify a season for this request.") if not "date" in params: raise AssertionError("You must specify a 'date' param for this request.") return "{base_url}/{league}/{season}/date/{date}/dfs.{output}".format(base_url=self.base_url, league=league, season=season, date=params["date"], output=output_format) elif feed == "weekly_dfs": if season == "": raise AssertionError("You must specify a season for this request.") if not "week" in params: raise AssertionError("You must specify a 'week' param for this request.") return "{base_url}/{league}/{season}/week/{week}/dfs.{output}".format(base_url=self.base_url, league=league, season=season, week=params["week"], output=output_format) elif feed == "seasonal_player_gamelogs": if season == "": raise AssertionError("You must specify a season for this request.") return "{base_url}/{league}/{season}/player_gamelogs.{output}".format(base_url=self.base_url, league=league, season=season, output=output_format) elif feed == "daily_player_gamelogs": if season == "": raise AssertionError("You must specify a season for this request.") if not "date" in params: raise AssertionError("You must specify a 'date' param for this request.") return "{base_url}/{league}/{season}/date/{date}/player_gamelogs.{output}".format(base_url=self.base_url, league=league, season=season, date=params["date"], output=output_format) elif feed == "weekly_player_gamelogs": if season == "": raise AssertionError("You must specify a season for this request.") if not "week" in params: raise AssertionError("You must specify a 'week' param for this request.") return "{base_url}/{league}/{season}/week/{week}/player_gamelogs.{output}".format(base_url=self.base_url, league=league, season=season, week=params["week"], output=output_format) elif feed == "seasonal_team_gamelogs": if season == "": raise AssertionError("You must specify a season for this request.") return "{base_url}/{league}/{season}/team_gamelogs.{output}".format(base_url=self.base_url, league=league, season=season, output=output_format) elif feed == "daily_team_gamelogs": if season == "": raise AssertionError("You must specify a season for this request.") if not "date" in params: raise AssertionError("You must specify a 'date' param for this request.") return "{base_url}/{league}/{season}/date/{date}/team_gamelogs.{output}".format(base_url=self.base_url, league=league, season=season, date=params["date"], output=output_format) elif feed == "weekly_team_gamelogs": if season == "": raise AssertionError("You must specify a season for this request.") if not "week" in params: raise AssertionError("You must specify a 'week' param for this request.") return "{base_url}/{league}/{season}/week/{week}/team_gamelogs.{output}".format(base_url=self.base_url, league=league, season=season, week=params["week"], output=output_format) elif feed == "game_boxscore": if season == "": raise AssertionError("You must specify a season for this request.") if not "game" in params: raise AssertionError("You must specify a 'game' param for this request.") return "{base_url}/{league}/{season}/games/{game}/boxscore.{output}".format(base_url=self.base_url, league=league, season=season, game=params["game"], output=output_format) elif feed == "game_playbyplay": if season == "": raise AssertionError("You must specify a season for this request.") if not "game" in params: raise AssertionError("You must specify a 'game' param for this request.") return "{base_url}/{league}/{season}/games/{game}/playbyplay.{output}".format(base_url=self.base_url, league=league, season=season, game=params["game"], output=output_format) elif feed == "game_lineup": if season == "": raise AssertionError("You must specify a season for this request.") if not "game" in params: raise AssertionError("You must specify a 'game' param for this request.") return "{base_url}/{league}/{season}/games/{game}/lineup.{output}".format(base_url=self.base_url, league=league, season=season, game=params["game"], output=output_format) elif feed == "current_season": return "{base_url}/{league}/current_season.{output}".format(base_url=self.base_url, league=league, output=output_format) elif feed == "player_injuries": return "{base_url}/{league}/injuries.{output}".format(base_url=self.base_url, league=league, output=output_format) elif feed == "latest_updates": if season == "": raise AssertionError("You must specify a season for this request.") return "{base_url}/{league}/{season}/latest_updates.{output}".format(base_url=self.base_url, league=league, season=season, output=output_format) elif feed == "seasonal_team_stats": if season == "": raise AssertionError("You must specify a season for this request.") return "{base_url}/{league}/{season}/team_stats_totals.{output}".format(base_url=self.base_url, league=league, season=season, output=output_format) elif feed == "seasonal_player_stats": if season == "": raise AssertionError("You must specify a season for this request.") return "{base_url}/{league}/{season}/player_stats_totals.{output}".format(base_url=self.base_url, league=league, season=season, output=output_format) elif feed == "seasonal_venues": if season == "": raise AssertionError("You must specify a season for this request.") return "{base_url}/{league}/{season}/venues.{output}".format(base_url=self.base_url, league=league, season=season, output=output_format) elif feed == "players": return "{base_url}/{league}/players.{output}".format(base_url=self.base_url, league=league, output=output_format) elif feed == "seasonal_standings": if season == "": raise AssertionError("You must specify a season for this request.") return "{base_url}/{league}/{season}/standings.{output}".format(base_url=self.base_url, league=league, season=season, output=output_format) elif feed == "daily_game_lines": if season == "": raise AssertionError("You must specify a season for this request.") if not "date" in params: raise AssertionError("You must specify a 'date' param for this request.") return "{base_url}/{league}/{season}/date/{date}/odds_gamelines.{output}".format(base_url=self.base_url, league=league, season=season, date=params["date"], output=output_format) elif feed == "daily_futures": if season == "": raise AssertionError("You must specify a season for this request.") if not "date" in params: raise AssertionError("You must specify a 'date' param for this request.") return "{base_url}/{league}/{season}/date/{date}/odds_futures.{output}".format(base_url=self.base_url, league=league, season=season, date=params["date"], output=output_format) else: return ""
{ "content_hash": "4b9a318474cc25ada0b6f4261cc8aed2", "timestamp": "", "source": "github", "line_count": 220, "max_line_length": 190, "avg_line_length": 49.17272727272727, "alnum_prop": 0.6070438158624515, "repo_name": "MySportsFeeds/mysportsfeeds-python", "id": "ac59d74bbfe59ac799e7a74914cedd5035f1b254", "size": "10818", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "ohmysportsfeedspy/v2_0.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "317" }, { "name": "Python", "bytes": "38672" } ], "symlink_target": "" }
from __future__ import absolute_import import six from sentry.testutils import APITestCase class CloudflareMetadataTest(APITestCase): def test_simple(self): user = self.create_user(email="a@example.com") self.login_as(user=user) resp = self.client.get("/extensions/cloudflare/metadata/", format="json") assert resp.status_code == 200, resp.content assert resp.data["metadata"] == { "userId": six.text_type(user.id), "username": user.username, "email": user.email, }
{ "content_hash": "929fa7a29ccd739919fe326a79be53a3", "timestamp": "", "source": "github", "line_count": 21, "max_line_length": 81, "avg_line_length": 26.714285714285715, "alnum_prop": 0.6256684491978609, "repo_name": "beeftornado/sentry", "id": "834bfec1bcd6bfebbcd145edbcb9b91f3d411163", "size": "561", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "tests/sentry/integrations/cloudflare/test_metadata.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "157195" }, { "name": "HTML", "bytes": "197026" }, { "name": "JavaScript", "bytes": "380379" }, { "name": "Makefile", "bytes": "2832" }, { "name": "Python", "bytes": "6473603" } ], "symlink_target": "" }
""" Handles link keys that are used to give anonymous users elevated privileges. These can be used for different things and should be tied to the user it should be used by. These keys should be temporary. """ from django.db import models from django.contrib import admin from django.contrib.auth.models import User from hackfsu_com.admin import hackfsu_admin from django.conf import settings from django.utils import timezone import string import random class LinkKeyManager(models.Manager): def valid_key_exists(self, key_type, key): return self.filter(type=key_type, key=key, used_at__isnull=True, expires_at__gt=timezone.now()).exists() def valid_key_exists_for_user(self, key_type, user): return self.filter(type=key_type, user=user, used_at__isnull=True, expires_at__gt=timezone.now()).exists() class LinkKey(models.Model): objects = LinkKeyManager() KEY_MAX_LENGTH = 64 TYPE_PASSWORD_RESET = 0 TYPE = ( (TYPE_PASSWORD_RESET, 'Password Reset'), ) LINK_BASE = { TYPE_PASSWORD_RESET: settings.URL_BASE + '/user/password/reset/{}/' } user = models.ForeignKey(to=User, on_delete=models.CASCADE) type = models.SmallIntegerField(choices=TYPE) key = models.CharField(unique=True, max_length=KEY_MAX_LENGTH) expires_at = models.DateTimeField() created_at = models.DateTimeField(auto_now_add=True) used_at = models.DateTimeField(null=True, blank=True) @staticmethod def generate_unique_key() -> str: while True: key = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(LinkKey.KEY_MAX_LENGTH)) if not LinkKey.objects.filter(key=key).exists(): return key def get_link(self): return LinkKey.LINK_BASE[self.type].format(self.key) @admin.register(LinkKey, site=hackfsu_admin) class LinkKeyAdmin(admin.ModelAdmin): list_filter = ('type',) list_display = ('id', 'type', 'user_info', 'key', 'created_at', 'expires_at', 'used_at') list_editable = () list_display_links = ('id',) search_fields = ('user__email', 'user__first_name', 'user__last_name') ordering = ('-created_at',) @staticmethod def user_info(obj): return "{} {} - {}".format(obj.user.first_name, obj.user.last_name, obj.user.email)
{ "content_hash": "7cb0221860e0501780e6ecf3f6e91059", "timestamp": "", "source": "github", "line_count": 65, "max_line_length": 119, "avg_line_length": 35.83076923076923, "alnum_prop": 0.6749677973379132, "repo_name": "andrewsosa/hackfsu_com", "id": "25acfaab03fea8c8c147ee85cdd644a8dc41239b", "size": "2329", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "api/api/models/link_key.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "81944" }, { "name": "HTML", "bytes": "88639" }, { "name": "JavaScript", "bytes": "127887" }, { "name": "Python", "bytes": "279510" }, { "name": "Shell", "bytes": "897" } ], "symlink_target": "" }
""" Tests for the CLI """ import shutil import tempfile from future.moves.urllib.parse import urlparse from io import StringIO from mock import patch from dql.cli import repl_command, DQLClient try: import unittest2 as unittest # pylint: disable=F0401 except ImportError: import unittest class TestCli(unittest.TestCase): """ Tests for the CLI """ dynamo = None def setUp(self): super(TestCli, self).setUp() self.confdir = tempfile.mkdtemp() self.cli = DQLClient() host = urlparse(self.dynamo.host) self.cli.initialize('local', port=host.port, config_dir=self.confdir) def tearDown(self): super(TestCli, self).tearDown() shutil.rmtree(self.confdir) def assert_prints(self, command, message): """ Assert that a cli command will print a message to the console """ out = StringIO() with patch('sys.stdout', out): self.cli.onecmd(command) self.assertEqual(out.getvalue().strip(), message.strip()) def test_repl_command_args(self): """ The @repl_command decorator parses arguments and passes them in """ @repl_command def testfunc(zelf, first, second): """ Test cli command """ self.assertEqual(zelf, self) self.assertEqual(first, 'a') self.assertEqual(second, 'b') testfunc(self, 'a b') # pylint: disable=E1120 def test_repl_command_kwargs(self): """ The @repl_command decorator parses kwargs and passes them in """ @repl_command def testfunc(zelf, first, second=None): """ Test cli command """ self.assertEqual(zelf, self) self.assertEqual(first, 'a') self.assertEqual(second, 'b') testfunc(self, 'a second=b') def test_help_docs(self): """ There is a help command for every DQL query type """ from dql import help for name in dir(help): # Options is not a query type if name == 'OPTIONS': continue if not name.startswith('_'): self.assert_prints('help %s' % name.lower(), getattr(help, name))
{ "content_hash": "2d27d6a7cde8e15b865d428e3c9bb9a9", "timestamp": "", "source": "github", "line_count": 68, "max_line_length": 81, "avg_line_length": 32.30882352941177, "alnum_prop": 0.6026399635867091, "repo_name": "mathcamp/dql", "id": "59578043f2a164cc14a3e8b4546c821607885897", "size": "2197", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/test_cli.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "247512" }, { "name": "Shell", "bytes": "1328" } ], "symlink_target": "" }
"""The tests for the Demo lock platform.""" import unittest from homeassistant.setup import setup_component from homeassistant.components import lock from tests.common import get_test_home_assistant, mock_service FRONT = 'lock.front_door' KITCHEN = 'lock.kitchen_door' OPENABLE_LOCK = 'lock.openable_lock' class TestLockDemo(unittest.TestCase): """Test the demo lock.""" def setUp(self): # pylint: disable=invalid-name """Set up things to be run when tests are started.""" self.hass = get_test_home_assistant() self.assertTrue(setup_component(self.hass, lock.DOMAIN, { 'lock': { 'platform': 'demo' } })) def tearDown(self): # pylint: disable=invalid-name """Stop everything that was started.""" self.hass.stop() def test_is_locked(self): """Test if lock is locked.""" self.assertTrue(lock.is_locked(self.hass, FRONT)) self.hass.states.is_state(FRONT, 'locked') self.assertFalse(lock.is_locked(self.hass, KITCHEN)) self.hass.states.is_state(KITCHEN, 'unlocked') def test_locking(self): """Test the locking of a lock.""" lock.lock(self.hass, KITCHEN) self.hass.block_till_done() self.assertTrue(lock.is_locked(self.hass, KITCHEN)) def test_unlocking(self): """Test the unlocking of a lock.""" lock.unlock(self.hass, FRONT) self.hass.block_till_done() self.assertFalse(lock.is_locked(self.hass, FRONT)) def test_opening(self): """Test the opening of a lock.""" calls = mock_service(self.hass, lock.DOMAIN, lock.SERVICE_OPEN) lock.open_lock(self.hass, OPENABLE_LOCK) self.hass.block_till_done() self.assertEqual(1, len(calls))
{ "content_hash": "f941d09e7ee3e8367776117cba9272c5", "timestamp": "", "source": "github", "line_count": 56, "max_line_length": 71, "avg_line_length": 32.107142857142854, "alnum_prop": 0.6312569521690767, "repo_name": "persandstrom/home-assistant", "id": "500cc7f9a6a671efe26f877aff21093e739a1076", "size": "1798", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/components/lock/test_demo.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "1067" }, { "name": "Python", "bytes": "11745210" }, { "name": "Ruby", "bytes": "518" }, { "name": "Shell", "bytes": "16652" } ], "symlink_target": "" }
""" Revision ID: 0254_folders_for_all Revises: 0253_set_template_postage Create Date: 2019-01-08 13:30:48.694881+00 """ from alembic import op revision = "0254_folders_for_all" down_revision = "0253_set_template_postage" def upgrade(): op.execute( """ INSERT INTO service_permissions (service_id, permission, created_at) SELECT id, '{permission}', now() FROM services WHERE NOT EXISTS ( SELECT FROM service_permissions WHERE service_id = services.id and permission = '{permission}' ) """.format( permission="edit_folders" ) ) def downgrade(): pass
{ "content_hash": "49f13775b0ca105b326cfb2ca2137bfd", "timestamp": "", "source": "github", "line_count": 39, "max_line_length": 68, "avg_line_length": 20.487179487179485, "alnum_prop": 0.5081351689612015, "repo_name": "alphagov/notifications-api", "id": "419901a567d76e2a6a2323d9a087c86f9d2ddf56", "size": "799", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "migrations/versions/0254_folders_for_all.py", "mode": "33188", "license": "mit", "language": [ { "name": "Dockerfile", "bytes": "719" }, { "name": "Jinja", "bytes": "5543" }, { "name": "Makefile", "bytes": "6627" }, { "name": "Mako", "bytes": "361" }, { "name": "Procfile", "bytes": "35" }, { "name": "Python", "bytes": "3506225" }, { "name": "Shell", "bytes": "13179" } ], "symlink_target": "" }
import sys import os import shlex # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('..')) # -- General configuration ------------------------------------------------ project = u'pygorithm' version = release = u'1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.viewcode', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: source_suffix = ['.rst', '.md'] # source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' latex_documents = [ ('index', 'pygorithm.tex', u"Pygorithm", u'Omkar Pathak', 'manual'), ] # Auto-Doc options autodoc_member_order = 'bysource' # alternatively 'alphabetical' (default) or 'groupwise' # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'pygorithm', u"Pygorithm", [u'Omkar Pathak'], 1) ] # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'pygorithm', u'Pygorithm documentation', u'Omkar Pathak', 'pygorithm documentation', 'One line description of project.', 'Miscellaneous'), ]
{ "content_hash": "919028d58a3ff00c1f33710f5913d32d", "timestamp": "", "source": "github", "line_count": 63, "max_line_length": 89, "avg_line_length": 31.22222222222222, "alnum_prop": 0.6578546009150992, "repo_name": "OmkarPathak/pygorithm", "id": "03d9da55fc43832c3f04139340d599efe6489cb0", "size": "2070", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "docs/conf.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "218" }, { "name": "Python", "bytes": "483492" } ], "symlink_target": "" }
from io import BytesIO import PyPDF2 as pyPdf import scrapy from ..spiders.CustomSpider import CustomSpider class PDFSpider(CustomSpider): """ This scraper can download files that are linked from a PDF. It will extract every single link from the PDF and yield a PageItem for each PDF, with the links as file_urls. """ name = "pdf" def start_requests(self): for start_url in self.database_urls: yield scrapy.Request( start_url, meta={ 'source_url': start_url, 'source_anchor': start_url }, callback=self.parse_for_files ) def extract_links(self, response): pdf = pyPdf.PdfFileReader(BytesIO(response.body)) pgs = pdf.getNumPages() for page_num in range(pgs): page = pdf.getPage(page_num) annotations = page.get('/Annots', []) for annotation in annotations: annot_object = annotation.getObject() a_tag = annot_object.get('/A') if a_tag and '/URI' in a_tag: uri = a_tag['/URI'] if isinstance(uri, pyPdf.generic.ByteStringObject): uri = uri.decode("utf-8").replace("\x00", "") yield (uri, uri)
{ "content_hash": "f85c3bdd7fa2f4cf0768be8db11c846d", "timestamp": "", "source": "github", "line_count": 44, "max_line_length": 80, "avg_line_length": 30.90909090909091, "alnum_prop": 0.538235294117647, "repo_name": "opensyllabus/osp-scraper", "id": "411c1bab05e67e5a3d37d740579dc6a5229d0b78", "size": "1360", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "osp_scraper/spiders/pdf.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "52616" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.contrib.admin.tests import AdminSeleniumWebDriverTestCase from django.contrib.admin.helpers import InlineAdminForm from django.contrib.auth.models import User, Permission from django.contrib.contenttypes.models import ContentType from django.test import TestCase from django.test.utils import override_settings # local test models from .admin import InnerInline from .models import (Holder, Inner, Holder2, Inner2, Holder3, Inner3, Person, OutfitItem, Fashionista, Teacher, Parent, Child, Author, Book, Profile, ProfileCollection, ParentModelWithCustomPk, ChildModel1, ChildModel2, Sighting, Novel, Chapter, FootNote, BinaryTree, SomeParentModel, SomeChildModel) @override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',)) class TestInline(TestCase): urls = "admin_inlines.urls" fixtures = ['admin-views-users.xml'] def setUp(self): holder = Holder(dummy=13) holder.save() Inner(dummy=42, holder=holder).save() self.change_url = '/admin/admin_inlines/holder/%i/' % holder.id result = self.client.login(username='super', password='secret') self.assertEqual(result, True) def tearDown(self): self.client.logout() def test_can_delete(self): """ can_delete should be passed to inlineformset factory. """ response = self.client.get(self.change_url) inner_formset = response.context['inline_admin_formsets'][0].formset expected = InnerInline.can_delete actual = inner_formset.can_delete self.assertEqual(expected, actual, 'can_delete must be equal') def test_readonly_stacked_inline_label(self): """Bug #13174.""" holder = Holder.objects.create(dummy=42) Inner.objects.create(holder=holder, dummy=42, readonly='') response = self.client.get('/admin/admin_inlines/holder/%i/' % holder.id) self.assertContains(response, '<label>Inner readonly label:</label>') def test_many_to_many_inlines(self): "Autogenerated many-to-many inlines are displayed correctly (#13407)" response = self.client.get('/admin/admin_inlines/author/add/') # The heading for the m2m inline block uses the right text self.assertContains(response, '<h2>Author-book relationships</h2>') # The "add another" label is correct self.assertContains(response, 'Add another Author-book relationship') # The '+' is dropped from the autogenerated form prefix (Author_books+) self.assertContains(response, 'id="id_Author_books-TOTAL_FORMS"') def test_inline_primary(self): person = Person.objects.create(firstname='Imelda') item = OutfitItem.objects.create(name='Shoes') # Imelda likes shoes, but can't cary her own bags. data = { 'shoppingweakness_set-TOTAL_FORMS': 1, 'shoppingweakness_set-INITIAL_FORMS': 0, 'shoppingweakness_set-MAX_NUM_FORMS': 0, '_save': 'Save', 'person': person.id, 'max_weight': 0, 'shoppingweakness_set-0-item': item.id, } response = self.client.post('/admin/admin_inlines/fashionista/add/', data) self.assertEqual(response.status_code, 302) self.assertEqual(len(Fashionista.objects.filter(person__firstname='Imelda')), 1) def test_tabular_non_field_errors(self): """ Ensure that non_field_errors are displayed correctly, including the right value for colspan. Refs #13510. """ data = { 'title_set-TOTAL_FORMS': 1, 'title_set-INITIAL_FORMS': 0, 'title_set-MAX_NUM_FORMS': 0, '_save': 'Save', 'title_set-0-title1': 'a title', 'title_set-0-title2': 'a different title', } response = self.client.post('/admin/admin_inlines/titlecollection/add/', data) # Here colspan is "4": two fields (title1 and title2), one hidden field and the delete checkbock. self.assertContains(response, '<tr><td colspan="4"><ul class="errorlist"><li>The two titles must be the same</li></ul></td></tr>') def test_no_parent_callable_lookup(self): """Admin inline `readonly_field` shouldn't invoke parent ModelAdmin callable""" # Identically named callable isn't present in the parent ModelAdmin, # rendering of the add view shouldn't explode response = self.client.get('/admin/admin_inlines/novel/add/') self.assertEqual(response.status_code, 200) # View should have the child inlines section self.assertContains(response, '<div class="inline-group" id="chapter_set-group">') def test_callable_lookup(self): """Admin inline should invoke local callable when its name is listed in readonly_fields""" response = self.client.get('/admin/admin_inlines/poll/add/') self.assertEqual(response.status_code, 200) # Add parent object view should have the child inlines section self.assertContains(response, '<div class="inline-group" id="question_set-group">') # The right callabe should be used for the inline readonly_fields # column cells self.assertContains(response, '<p>Callable in QuestionInline</p>') def test_help_text(self): """ Ensure that the inlines' model field help texts are displayed when using both the stacked and tabular layouts. Ref #8190. """ response = self.client.get('/admin/admin_inlines/holder4/add/') self.assertContains(response, '<p class="help">Awesome stacked help text is awesome.</p>', 4) self.assertContains(response, '<img src="/static/admin/img/icon-unknown.gif" class="help help-tooltip" width="10" height="10" alt="(Awesome tabular help text is awesome.)" title="Awesome tabular help text is awesome." />', 1) # ReadOnly fields response = self.client.get('/admin/admin_inlines/capofamiglia/add/') self.assertContains(response, '<img src="/static/admin/img/icon-unknown.gif" class="help help-tooltip" width="10" height="10" alt="(Help text for ReadOnlyInline)" title="Help text for ReadOnlyInline" />', 1) def test_inline_hidden_field_no_column(self): """#18263 -- Make sure hidden fields don't get a column in tabular inlines""" parent = SomeParentModel.objects.create(name='a') SomeChildModel.objects.create(name='b', position='0', parent=parent) SomeChildModel.objects.create(name='c', position='1', parent=parent) response = self.client.get('/admin/admin_inlines/someparentmodel/%s/' % parent.pk) self.assertNotContains(response, '<td class="field-position">') self.assertContains(response, ( '<input id="id_somechildmodel_set-1-position" ' 'name="somechildmodel_set-1-position" type="hidden" value="1" />')) def test_non_related_name_inline(self): """ Ensure that multiple inlines with related_name='+' have correct form prefixes. Bug #16838. """ response = self.client.get('/admin/admin_inlines/capofamiglia/add/') self.assertContains(response, '<input type="hidden" name="-1-0-id" id="id_-1-0-id" />', html=True) self.assertContains(response, '<input type="hidden" name="-1-0-capo_famiglia" id="id_-1-0-capo_famiglia" />', html=True) self.assertContains(response, '<input id="id_-1-0-name" type="text" class="vTextField" ' 'name="-1-0-name" maxlength="100" />', html=True) self.assertContains(response, '<input type="hidden" name="-2-0-id" id="id_-2-0-id" />', html=True) self.assertContains(response, '<input type="hidden" name="-2-0-capo_famiglia" id="id_-2-0-capo_famiglia" />', html=True) self.assertContains(response, '<input id="id_-2-0-name" type="text" class="vTextField" ' 'name="-2-0-name" maxlength="100" />', html=True) @override_settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=True) def test_localize_pk_shortcut(self): """ Ensure that the "View on Site" link is correct for locales that use thousand separators """ holder = Holder.objects.create(pk=123456789, dummy=42) inner = Inner.objects.create(pk=987654321, holder=holder, dummy=42, readonly='') response = self.client.get('/admin/admin_inlines/holder/%i/' % holder.id) inner_shortcut = 'r/%s/%s/' % (ContentType.objects.get_for_model(inner).pk, inner.pk) self.assertContains(response, inner_shortcut) def test_custom_pk_shortcut(self): """ Ensure that the "View on Site" link is correct for models with a custom primary key field. Bug #18433. """ parent = ParentModelWithCustomPk.objects.create(my_own_pk="foo", name="Foo") child1 = ChildModel1.objects.create(my_own_pk="bar", name="Bar", parent=parent) child2 = ChildModel2.objects.create(my_own_pk="baz", name="Baz", parent=parent) response = self.client.get('/admin/admin_inlines/parentmodelwithcustompk/foo/') child1_shortcut = 'r/%s/%s/' % (ContentType.objects.get_for_model(child1).pk, child1.pk) child2_shortcut = 'r/%s/%s/' % (ContentType.objects.get_for_model(child2).pk, child2.pk) self.assertContains(response, child1_shortcut) self.assertContains(response, child2_shortcut) def test_create_inlines_on_inherited_model(self): """ Ensure that an object can be created with inlines when it inherits another class. Bug #19524. """ data = { 'name': 'Martian', 'sighting_set-TOTAL_FORMS': 1, 'sighting_set-INITIAL_FORMS': 0, 'sighting_set-MAX_NUM_FORMS': 0, 'sighting_set-0-place': 'Zone 51', '_save': 'Save', } response = self.client.post('/admin/admin_inlines/extraterrestrial/add/', data) self.assertEqual(response.status_code, 302) self.assertEqual(Sighting.objects.filter(et__name='Martian').count(), 1) def test_custom_get_extra_form(self): bt_head = BinaryTree.objects.create(name="Tree Head") BinaryTree.objects.create(name="First Child", parent=bt_head) # The maximum number of forms should respect 'get_max_num' on the # ModelAdmin max_forms_input = '<input id="id_binarytree_set-MAX_NUM_FORMS" name="binarytree_set-MAX_NUM_FORMS" type="hidden" value="%d" />' # The total number of forms will remain the same in either case total_forms_hidden = '<input id="id_binarytree_set-TOTAL_FORMS" name="binarytree_set-TOTAL_FORMS" type="hidden" value="2" />' response = self.client.get('/admin/admin_inlines/binarytree/add/') self.assertContains(response, max_forms_input % 3) self.assertContains(response, total_forms_hidden) response = self.client.get("/admin/admin_inlines/binarytree/%d/" % bt_head.id) self.assertContains(response, max_forms_input % 2) self.assertContains(response, total_forms_hidden) def test_inline_nonauto_noneditable_pk(self): response = self.client.get('/admin/admin_inlines/author/add/') self.assertContains(response, '<input id="id_nonautopkbook_set-0-rand_pk" name="nonautopkbook_set-0-rand_pk" type="hidden" />', html=True) self.assertContains(response, '<input id="id_nonautopkbook_set-2-0-rand_pk" name="nonautopkbook_set-2-0-rand_pk" type="hidden" />', html=True) def test_inline_editable_pk(self): response = self.client.get('/admin/admin_inlines/author/add/') self.assertContains(response, '<input class="vIntegerField" id="id_editablepkbook_set-0-manual_pk" name="editablepkbook_set-0-manual_pk" type="text" />', html=True, count=1) self.assertContains(response, '<input class="vIntegerField" id="id_editablepkbook_set-2-0-manual_pk" name="editablepkbook_set-2-0-manual_pk" type="text" />', html=True, count=1) @override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',)) class TestInlineMedia(TestCase): urls = "admin_inlines.urls" fixtures = ['admin-views-users.xml'] def setUp(self): result = self.client.login(username='super', password='secret') self.assertEqual(result, True) def tearDown(self): self.client.logout() def test_inline_media_only_base(self): holder = Holder(dummy=13) holder.save() Inner(dummy=42, holder=holder).save() change_url = '/admin/admin_inlines/holder/%i/' % holder.id response = self.client.get(change_url) self.assertContains(response, 'my_awesome_admin_scripts.js') def test_inline_media_only_inline(self): holder = Holder3(dummy=13) holder.save() Inner3(dummy=42, holder=holder).save() change_url = '/admin/admin_inlines/holder3/%i/' % holder.id response = self.client.get(change_url) self.assertContains(response, 'my_awesome_inline_scripts.js') def test_all_inline_media(self): holder = Holder2(dummy=13) holder.save() Inner2(dummy=42, holder=holder).save() change_url = '/admin/admin_inlines/holder2/%i/' % holder.id response = self.client.get(change_url) self.assertContains(response, 'my_awesome_admin_scripts.js') self.assertContains(response, 'my_awesome_inline_scripts.js') class TestInlineAdminForm(TestCase): urls = "admin_inlines.urls" def test_immutable_content_type(self): """Regression for #9362 The problem depends only on InlineAdminForm and its "original" argument, so we can safely set the other arguments to None/{}. We just need to check that the content_type argument of Child isn't altered by the internals of the inline form.""" sally = Teacher.objects.create(name='Sally') john = Parent.objects.create(name='John') joe = Child.objects.create(name='Joe', teacher=sally, parent=john) iaf = InlineAdminForm(None, None, {}, {}, joe) parent_ct = ContentType.objects.get_for_model(Parent) self.assertEqual(iaf.original.content_type, parent_ct) @override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',)) class TestInlineProtectedOnDelete(TestCase): urls = "admin_inlines.urls" fixtures = ['admin-views-users.xml'] def setUp(self): result = self.client.login(username='super', password='secret') self.assertEqual(result, True) def tearDown(self): self.client.logout() def test_deleting_inline_with_protected_delete_does_not_validate(self): lotr = Novel.objects.create(name='Lord of the rings') chapter = Chapter.objects.create(novel=lotr, name='Many Meetings') foot_note = FootNote.objects.create(chapter=chapter, note='yadda yadda') change_url = '/admin/admin_inlines/novel/%i/' % lotr.id response = self.client.get(change_url) data = { 'name': lotr.name, 'chapter_set-TOTAL_FORMS': 1, 'chapter_set-INITIAL_FORMS': 1, 'chapter_set-MAX_NUM_FORMS': 1000, '_save': 'Save', 'chapter_set-0-id': chapter.id, 'chapter_set-0-name': chapter.name, 'chapter_set-0-novel': lotr.id, 'chapter_set-0-DELETE': 'on' } response = self.client.post(change_url, data) self.assertEqual(response.status_code, 200) self.assertContains(response, "Deleting chapter %s would require deleting " "the following protected related objects: foot note %s" % (chapter, foot_note)) class TestInlinePermissions(TestCase): """ Make sure the admin respects permissions for objects that are edited inline. Refs #8060. """ urls = "admin_inlines.urls" def setUp(self): self.user = User(username='admin') self.user.is_staff = True self.user.is_active = True self.user.set_password('secret') self.user.save() self.author_ct = ContentType.objects.get_for_model(Author) self.holder_ct = ContentType.objects.get_for_model(Holder2) self.book_ct = ContentType.objects.get_for_model(Book) self.inner_ct = ContentType.objects.get_for_model(Inner2) # User always has permissions to add and change Authors, and Holders, # the main (parent) models of the inlines. Permissions on the inlines # vary per test. permission = Permission.objects.get(codename='add_author', content_type=self.author_ct) self.user.user_permissions.add(permission) permission = Permission.objects.get(codename='change_author', content_type=self.author_ct) self.user.user_permissions.add(permission) permission = Permission.objects.get(codename='add_holder2', content_type=self.holder_ct) self.user.user_permissions.add(permission) permission = Permission.objects.get(codename='change_holder2', content_type=self.holder_ct) self.user.user_permissions.add(permission) author = Author.objects.create(pk=1, name='The Author') book = author.books.create(name='The inline Book') self.author_change_url = '/admin/admin_inlines/author/%i/' % author.id # Get the ID of the automatically created intermediate model for thw Author-Book m2m author_book_auto_m2m_intermediate = Author.books.through.objects.get(author=author, book=book) self.author_book_auto_m2m_intermediate_id = author_book_auto_m2m_intermediate.pk holder = Holder2.objects.create(dummy=13) inner2 = Inner2.objects.create(dummy=42, holder=holder) self.holder_change_url = '/admin/admin_inlines/holder2/%i/' % holder.id self.inner2_id = inner2.id self.assertEqual( self.client.login(username='admin', password='secret'), True) def tearDown(self): self.client.logout() def test_inline_add_m2m_noperm(self): response = self.client.get('/admin/admin_inlines/author/add/') # No change permission on books, so no inline self.assertNotContains(response, '<h2>Author-book relationships</h2>') self.assertNotContains(response, 'Add another Author-Book Relationship') self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"') def test_inline_add_fk_noperm(self): response = self.client.get('/admin/admin_inlines/holder2/add/') # No permissions on Inner2s, so no inline self.assertNotContains(response, '<h2>Inner2s</h2>') self.assertNotContains(response, 'Add another Inner2') self.assertNotContains(response, 'id="id_inner2_set-TOTAL_FORMS"') def test_inline_change_m2m_noperm(self): response = self.client.get(self.author_change_url) # No change permission on books, so no inline self.assertNotContains(response, '<h2>Author-book relationships</h2>') self.assertNotContains(response, 'Add another Author-Book Relationship') self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"') def test_inline_change_fk_noperm(self): response = self.client.get(self.holder_change_url) # No permissions on Inner2s, so no inline self.assertNotContains(response, '<h2>Inner2s</h2>') self.assertNotContains(response, 'Add another Inner2') self.assertNotContains(response, 'id="id_inner2_set-TOTAL_FORMS"') def test_inline_add_m2m_add_perm(self): permission = Permission.objects.get(codename='add_book', content_type=self.book_ct) self.user.user_permissions.add(permission) response = self.client.get('/admin/admin_inlines/author/add/') # No change permission on Books, so no inline self.assertNotContains(response, '<h2>Author-book relationships</h2>') self.assertNotContains(response, 'Add another Author-Book Relationship') self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"') def test_inline_add_fk_add_perm(self): permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct) self.user.user_permissions.add(permission) response = self.client.get('/admin/admin_inlines/holder2/add/') # Add permission on inner2s, so we get the inline self.assertContains(response, '<h2>Inner2s</h2>') self.assertContains(response, 'Add another Inner2') self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" ' 'value="3" name="inner2_set-TOTAL_FORMS" />', html=True) def test_inline_change_m2m_add_perm(self): permission = Permission.objects.get(codename='add_book', content_type=self.book_ct) self.user.user_permissions.add(permission) response = self.client.get(self.author_change_url) # No change permission on books, so no inline self.assertNotContains(response, '<h2>Author-book relationships</h2>') self.assertNotContains(response, 'Add another Author-Book Relationship') self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"') self.assertNotContains(response, 'id="id_Author_books-0-DELETE"') def test_inline_change_m2m_change_perm(self): permission = Permission.objects.get(codename='change_book', content_type=self.book_ct) self.user.user_permissions.add(permission) response = self.client.get(self.author_change_url) # We have change perm on books, so we can add/change/delete inlines self.assertContains(response, '<h2>Author-book relationships</h2>') self.assertContains(response, 'Add another Author-book relationship') self.assertContains(response, '<input type="hidden" id="id_Author_books-TOTAL_FORMS" ' 'value="4" name="Author_books-TOTAL_FORMS" />', html=True) self.assertContains(response, '<input type="hidden" id="id_Author_books-0-id" ' 'value="%i" name="Author_books-0-id" />' % self.author_book_auto_m2m_intermediate_id, html=True) self.assertContains(response, 'id="id_Author_books-0-DELETE"') def test_inline_change_fk_add_perm(self): permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct) self.user.user_permissions.add(permission) response = self.client.get(self.holder_change_url) # Add permission on inner2s, so we can add but not modify existing self.assertContains(response, '<h2>Inner2s</h2>') self.assertContains(response, 'Add another Inner2') # 3 extra forms only, not the existing instance form self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" ' 'value="3" name="inner2_set-TOTAL_FORMS" />', html=True) self.assertNotContains(response, '<input type="hidden" id="id_inner2_set-0-id" ' 'value="%i" name="inner2_set-0-id" />' % self.inner2_id, html=True) def test_inline_change_fk_change_perm(self): permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct) self.user.user_permissions.add(permission) response = self.client.get(self.holder_change_url) # Change permission on inner2s, so we can change existing but not add new self.assertContains(response, '<h2>Inner2s</h2>') # Just the one form for existing instances self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" ' 'value="1" name="inner2_set-TOTAL_FORMS" />', html=True) self.assertContains(response, '<input type="hidden" id="id_inner2_set-0-id" ' 'value="%i" name="inner2_set-0-id" />' % self.inner2_id, html=True) # max-num 0 means we can't add new ones self.assertContains(response, '<input type="hidden" id="id_inner2_set-MAX_NUM_FORMS" ' 'value="0" name="inner2_set-MAX_NUM_FORMS" />', html=True) def test_inline_change_fk_add_change_perm(self): permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct) self.user.user_permissions.add(permission) permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct) self.user.user_permissions.add(permission) response = self.client.get(self.holder_change_url) # Add/change perm, so we can add new and change existing self.assertContains(response, '<h2>Inner2s</h2>') # One form for existing instance and three extra for new self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" ' 'value="4" name="inner2_set-TOTAL_FORMS" />', html=True) self.assertContains(response, '<input type="hidden" id="id_inner2_set-0-id" ' 'value="%i" name="inner2_set-0-id" />' % self.inner2_id, html=True) def test_inline_change_fk_change_del_perm(self): permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct) self.user.user_permissions.add(permission) permission = Permission.objects.get(codename='delete_inner2', content_type=self.inner_ct) self.user.user_permissions.add(permission) response = self.client.get(self.holder_change_url) # Change/delete perm on inner2s, so we can change/delete existing self.assertContains(response, '<h2>Inner2s</h2>') # One form for existing instance only, no new self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" ' 'value="1" name="inner2_set-TOTAL_FORMS" />', html=True) self.assertContains(response, '<input type="hidden" id="id_inner2_set-0-id" ' 'value="%i" name="inner2_set-0-id" />' % self.inner2_id, html=True) self.assertContains(response, 'id="id_inner2_set-0-DELETE"') def test_inline_change_fk_all_perms(self): permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct) self.user.user_permissions.add(permission) permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct) self.user.user_permissions.add(permission) permission = Permission.objects.get(codename='delete_inner2', content_type=self.inner_ct) self.user.user_permissions.add(permission) response = self.client.get(self.holder_change_url) # All perms on inner2s, so we can add/change/delete self.assertContains(response, '<h2>Inner2s</h2>') # One form for existing instance only, three for new self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" ' 'value="4" name="inner2_set-TOTAL_FORMS" />', html=True) self.assertContains(response, '<input type="hidden" id="id_inner2_set-0-id" ' 'value="%i" name="inner2_set-0-id" />' % self.inner2_id, html=True) self.assertContains(response, 'id="id_inner2_set-0-DELETE"') @override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',)) class SeleniumFirefoxTests(AdminSeleniumWebDriverTestCase): available_apps = ['admin_inlines'] + AdminSeleniumWebDriverTestCase.available_apps fixtures = ['admin-views-users.xml'] urls = "admin_inlines.urls" webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver' def test_add_stackeds(self): """ Ensure that the "Add another XXX" link correctly adds items to the stacked formset. """ self.admin_login(username='super', password='secret') self.selenium.get('%s%s' % (self.live_server_url, '/admin/admin_inlines/holder4/add/')) inline_id = '#inner4stacked_set-group' rows_length = lambda: len(self.selenium.find_elements_by_css_selector( '%s .dynamic-inner4stacked_set' % inline_id)) self.assertEqual(rows_length(), 3) add_button = self.selenium.find_element_by_link_text( 'Add another Inner4 stacked') add_button.click() self.assertEqual(rows_length(), 4) def test_delete_stackeds(self): self.admin_login(username='super', password='secret') self.selenium.get('%s%s' % (self.live_server_url, '/admin/admin_inlines/holder4/add/')) inline_id = '#inner4stacked_set-group' rows_length = lambda: len(self.selenium.find_elements_by_css_selector( '%s .dynamic-inner4stacked_set' % inline_id)) self.assertEqual(rows_length(), 3) add_button = self.selenium.find_element_by_link_text( 'Add another Inner4 stacked') add_button.click() add_button.click() self.assertEqual(rows_length(), 5, msg="sanity check") for delete_link in self.selenium.find_elements_by_css_selector( '%s .inline-deletelink' % inline_id): delete_link.click() self.assertEqual(rows_length(), 3) def test_add_inlines(self): """ Ensure that the "Add another XXX" link correctly adds items to the inline form. """ self.admin_login(username='super', password='secret') self.selenium.get('%s%s' % (self.live_server_url, '/admin/admin_inlines/profilecollection/add/')) # Check that there's only one inline to start with and that it has the # correct ID. self.assertEqual(len(self.selenium.find_elements_by_css_selector( '.dynamic-profile_set')), 1) self.assertEqual(self.selenium.find_elements_by_css_selector( '.dynamic-profile_set')[0].get_attribute('id'), 'profile_set-0') self.assertEqual(len(self.selenium.find_elements_by_css_selector( '.dynamic-profile_set#profile_set-0 input[name=profile_set-0-first_name]')), 1) self.assertEqual(len(self.selenium.find_elements_by_css_selector( '.dynamic-profile_set#profile_set-0 input[name=profile_set-0-last_name]')), 1) # Add an inline self.selenium.find_element_by_link_text('Add another Profile').click() # Check that the inline has been added, that it has the right id, and # that it contains the right fields. self.assertEqual(len(self.selenium.find_elements_by_css_selector( '.dynamic-profile_set')), 2) self.assertEqual(self.selenium.find_elements_by_css_selector( '.dynamic-profile_set')[1].get_attribute('id'), 'profile_set-1') self.assertEqual(len(self.selenium.find_elements_by_css_selector( '.dynamic-profile_set#profile_set-1 input[name=profile_set-1-first_name]')), 1) self.assertEqual(len(self.selenium.find_elements_by_css_selector( '.dynamic-profile_set#profile_set-1 input[name=profile_set-1-last_name]')), 1) # Let's add another one to be sure self.selenium.find_element_by_link_text('Add another Profile').click() self.assertEqual(len(self.selenium.find_elements_by_css_selector( '.dynamic-profile_set')), 3) self.assertEqual(self.selenium.find_elements_by_css_selector( '.dynamic-profile_set')[2].get_attribute('id'), 'profile_set-2') self.assertEqual(len(self.selenium.find_elements_by_css_selector( '.dynamic-profile_set#profile_set-2 input[name=profile_set-2-first_name]')), 1) self.assertEqual(len(self.selenium.find_elements_by_css_selector( '.dynamic-profile_set#profile_set-2 input[name=profile_set-2-last_name]')), 1) # Enter some data and click 'Save' self.selenium.find_element_by_name('profile_set-0-first_name').send_keys('0 first name 1') self.selenium.find_element_by_name('profile_set-0-last_name').send_keys('0 last name 2') self.selenium.find_element_by_name('profile_set-1-first_name').send_keys('1 first name 1') self.selenium.find_element_by_name('profile_set-1-last_name').send_keys('1 last name 2') self.selenium.find_element_by_name('profile_set-2-first_name').send_keys('2 first name 1') self.selenium.find_element_by_name('profile_set-2-last_name').send_keys('2 last name 2') self.selenium.find_element_by_xpath('//input[@value="Save"]').click() self.wait_page_loaded() # Check that the objects have been created in the database self.assertEqual(ProfileCollection.objects.all().count(), 1) self.assertEqual(Profile.objects.all().count(), 3) def test_delete_inlines(self): self.admin_login(username='super', password='secret') self.selenium.get('%s%s' % (self.live_server_url, '/admin/admin_inlines/profilecollection/add/')) # Add a few inlines self.selenium.find_element_by_link_text('Add another Profile').click() self.selenium.find_element_by_link_text('Add another Profile').click() self.selenium.find_element_by_link_text('Add another Profile').click() self.selenium.find_element_by_link_text('Add another Profile').click() self.assertEqual(len(self.selenium.find_elements_by_css_selector( '#profile_set-group table tr.dynamic-profile_set')), 5) self.assertEqual(len(self.selenium.find_elements_by_css_selector( 'form#profilecollection_form tr.dynamic-profile_set#profile_set-0')), 1) self.assertEqual(len(self.selenium.find_elements_by_css_selector( 'form#profilecollection_form tr.dynamic-profile_set#profile_set-1')), 1) self.assertEqual(len(self.selenium.find_elements_by_css_selector( 'form#profilecollection_form tr.dynamic-profile_set#profile_set-2')), 1) self.assertEqual(len(self.selenium.find_elements_by_css_selector( 'form#profilecollection_form tr.dynamic-profile_set#profile_set-3')), 1) self.assertEqual(len(self.selenium.find_elements_by_css_selector( 'form#profilecollection_form tr.dynamic-profile_set#profile_set-4')), 1) # Click on a few delete buttons self.selenium.find_element_by_css_selector( 'form#profilecollection_form tr.dynamic-profile_set#profile_set-1 td.delete a').click() self.selenium.find_element_by_css_selector( 'form#profilecollection_form tr.dynamic-profile_set#profile_set-2 td.delete a').click() # Verify that they're gone and that the IDs have been re-sequenced self.assertEqual(len(self.selenium.find_elements_by_css_selector( '#profile_set-group table tr.dynamic-profile_set')), 3) self.assertEqual(len(self.selenium.find_elements_by_css_selector( 'form#profilecollection_form tr.dynamic-profile_set#profile_set-0')), 1) self.assertEqual(len(self.selenium.find_elements_by_css_selector( 'form#profilecollection_form tr.dynamic-profile_set#profile_set-1')), 1) self.assertEqual(len(self.selenium.find_elements_by_css_selector( 'form#profilecollection_form tr.dynamic-profile_set#profile_set-2')), 1) def test_alternating_rows(self): self.admin_login(username='super', password='secret') self.selenium.get('%s%s' % (self.live_server_url, '/admin/admin_inlines/profilecollection/add/')) # Add a few inlines self.selenium.find_element_by_link_text('Add another Profile').click() self.selenium.find_element_by_link_text('Add another Profile').click() row_selector = 'form#profilecollection_form tr.dynamic-profile_set' self.assertEqual(len(self.selenium.find_elements_by_css_selector( "%s.row1" % row_selector)), 2, msg="Expect two row1 styled rows") self.assertEqual(len(self.selenium.find_elements_by_css_selector( "%s.row2" % row_selector)), 1, msg="Expect one row2 styled row") class SeleniumChromeTests(SeleniumFirefoxTests): webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver' class SeleniumIETests(SeleniumFirefoxTests): webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
{ "content_hash": "a5afdf216c2f2caecbcf4ce135ccb2f2", "timestamp": "", "source": "github", "line_count": 702, "max_line_length": 233, "avg_line_length": 52.30911680911681, "alnum_prop": 0.6526783039677568, "repo_name": "ericholscher/django", "id": "2d0a7edd10abd6c1765439724dc05751ca811083", "size": "36721", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/admin_inlines/tests.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "51177" }, { "name": "JavaScript", "bytes": "102377" }, { "name": "Python", "bytes": "9011891" }, { "name": "Shell", "bytes": "12137" } ], "symlink_target": "" }
from UI import UI from page import Page import time display = UI () illustrations = [ "test_bookpage1.png", "test_bookpage2.png", "test_bookpage3.png", "test_bookpage4.png" ] raw_pages = [ "One fish", "two fish", "red fish", "blue fish", "Hello, this is some test text.", "This is some more text.", "This is some more text.", "This is some more text.", "This is some more text.", "This is the last page." ] page_callbacks = [] def make_page_callback (i): def ret (): prevpage_callback = None nextpage_callback = None if i != 0: prevpage_callback = page_callbacks[i-1] if i+1 != len (raw_pages): nextpage_callback = page_callbacks[i+1] illustration_path = "test_bookpage.png" if i < len(illustrations): illustration_path = illustrations[i] page = Page (display, raw_pages[i], illustration_path, prevpage_callback, nextpage_callback) if i != 0: page.set_confidences ([1-float(i)/float(len(raw_pages)-1)]*page.numwords) page.show () return ret for i in xrange (len (raw_pages)): page_callbacks += [make_page_callback (i)] for i in xrange (16): display.add_book_to_library (page_callbacks[0], "test_bookcover.png") display.display_library () while True: display.flush_callback_queue () time.sleep (0.1)
{ "content_hash": "a650c1d69344518aaff85bbf96ede0eb", "timestamp": "", "source": "github", "line_count": 65, "max_line_length": 100, "avg_line_length": 22.6, "alnum_prop": 0.5786249149081008, "repo_name": "Max-E/CS361-Reading-App", "id": "91168f4adf0560094e4753d9ca3f5d84b870e3c2", "size": "1469", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "UI/UI_TestHarness.py", "mode": "33188", "license": "mit", "language": [ { "name": "Java", "bytes": "1610" }, { "name": "Python", "bytes": "18309" } ], "symlink_target": "" }
import os import platform import sys import unittest from contextlib import contextmanager from io import StringIO from swift_build_support.swift_build_support import migration from swift_build_support.swift_build_support.SwiftBuildSupport import ( get_all_preset_names, get_preset_options, ) from . import expected_options as eo from .. import argparse from .. import driver_arguments FILE_PATH = os.path.abspath(__file__) TESTS_PATH = os.path.abspath(os.path.join(FILE_PATH, os.pardir)) BUILD_SWIFT_PATH = os.path.abspath(os.path.join(TESTS_PATH, os.pardir)) UTILS_PATH = os.path.abspath(os.path.join(BUILD_SWIFT_PATH, os.pardir)) BUILD_SCRIPT_IMPL = os.path.join(UTILS_PATH, 'build-script-impl') PRESETS_FILES = [ os.path.join(UTILS_PATH, 'build-presets.ini'), ] class ParserError(Exception): pass @contextmanager def redirect_stderr(stream=None): stream = stream or StringIO() old_stderr, sys.stderr = sys.stderr, stream try: yield stream finally: sys.stderr = old_stderr @contextmanager def redirect_stdout(stream=None): stream = stream or StringIO() old_stdout, sys.stdout = sys.stdout, stream try: yield stream finally: sys.stdout = old_stdout def _load_all_presets(presets_files): preset_names = get_all_preset_names(presets_files) # Hack to filter out mixins which are not expected to be valid presets preset_names = [n for n in preset_names if not n.startswith('mixin')] substitutions = { 'install_destdir': '/tmp/install', 'install_symroot': '/tmp/symroot', 'installable_package': '/tmp/xcode-xyz-root.tar.gz', } presets = dict() for name in preset_names: try: # Attempt to parse preset presets[name] = get_preset_options(substitutions, presets_files, name) except SystemExit: continue return presets class TestDriverArgumentParserMeta(type): """Metaclass used to dynamically generate test methods for each of the individual options accepted by the parser and methods to validate all of the presets. """ def __new__(cls, name, bases, attrs): # Generate tests for each default value for dest, value in eo.EXPECTED_DEFAULTS.items(): test_name = 'test_default_value_' + dest attrs[test_name] = cls.generate_default_value_test(dest, value) # Generate tests for each expected option for option in eo.EXPECTED_OPTIONS: test_name = 'test_option_' + option.sanitized_string() attrs[test_name] = cls.generate_option_test(option) # Generate tests for each preset presets = _load_all_presets(PRESETS_FILES) for name, args in presets.items(): test_name = 'test_preset_' + name attrs[test_name] = cls.generate_preset_test(name, args) return super(TestDriverArgumentParserMeta, cls).__new__( cls, name, bases, attrs) @classmethod def generate_default_value_test(cls, dest, default_value): def test(self): with self.assertNotRaises(ParserError): parsed_values = self.parse_default_args([]) parsed_value = getattr(parsed_values, dest) if default_value.__class__ is str: parsed_value = str(parsed_value) self.assertEqual(default_value, parsed_value, 'Invalid default value for "{}": {} != {}' .format(dest, default_value, parsed_value)) return test @classmethod def _generate_help_option_test(cls, option): def test(self): with redirect_stdout() as output, self.assertRaises(ParserError): self.parse_args([option.option_string]) self.assertNotEmpty(output) return test @classmethod def _generate_set_option_test(cls, option): def test(self): with self.assertNotRaises(ParserError): namespace = self.parse_args([option.option_string]) self.assertEqual(getattr(namespace, option.dest), option.value) with self.assertRaises(ParserError): self.parse_args([option.option_string, 'foo']) return test @classmethod def _generate_set_true_option_test(cls, option): def test(self): with self.assertNotRaises(ParserError): # TODO: Move to unit-tests for the action class namespace = self.parse_args([]) self.assertFalse(getattr(namespace, option.dest)) namespace = self.parse_args([option.option_string]) self.assertTrue(getattr(namespace, option.dest)) return test @classmethod def _generate_set_false_option_test(cls, option): def test(self): with self.assertNotRaises(ParserError): # TODO: Move to unit-tests for the action class namespace = self.parse_args([]) self.assertTrue(getattr(namespace, option.dest)) namespace = self.parse_args([option.option_string]) self.assertFalse(getattr(namespace, option.dest)) return test @classmethod def _generate_enable_option_test(cls, option): def test(self): with self.assertNotRaises(ParserError): # TODO: Move to unit-tests for the action class # Test parsing True values self.parse_args([option.option_string, '1']) self.parse_args([option.option_string, 'true']) self.parse_args([option.option_string, 'True']) self.parse_args([option.option_string, 'TRUE']) # TODO: Move to unit-tests for the action class # Test parsing False values self.parse_args([option.option_string, '0']) self.parse_args([option.option_string, 'false']) self.parse_args([option.option_string, 'False']) self.parse_args([option.option_string, 'FALSE']) # TODO: Move to unit-tests for the action class # Test default value namespace = self.parse_args([option.option_string]) self.assertTrue(getattr(namespace, option.dest)) # Test setting value to True namespace = self.parse_args([option.option_string, 'True']) self.assertTrue(getattr(namespace, option.dest)) # Test setting value to False namespace = self.parse_args([option.option_string, 'False']) self.assertFalse(getattr(namespace, option.dest)) return test @classmethod def _generate_disable_option_test(cls, option): def test(self): with self.assertNotRaises(ParserError): # TODO: Move to unit-tests for the action class # Test parsing True values self.parse_args([option.option_string, '1']) self.parse_args([option.option_string, 'true']) self.parse_args([option.option_string, 'True']) self.parse_args([option.option_string, 'TRUE']) # TODO: Move to unit-tests for the action class # Test parsing False values self.parse_args([option.option_string, '0']) self.parse_args([option.option_string, 'false']) self.parse_args([option.option_string, 'False']) self.parse_args([option.option_string, 'FALSE']) # TODO: Move to unit-tests for the action class # Test default value namespace = self.parse_args([option.option_string]) self.assertFalse(getattr(namespace, option.dest)) # Test setting value to True resulting in False namespace = self.parse_args([option.option_string, 'True']) self.assertFalse(getattr(namespace, option.dest)) # Test setting value to False resulting in True namespace = self.parse_args([option.option_string, 'False']) self.assertTrue(getattr(namespace, option.dest)) return test @classmethod def _generate_choices_option_test(cls, option): def test(self): with self.assertNotRaises(ParserError): for choice in option.choices: namespace = self.parse_args( [option.option_string, str(choice)]) self.assertEqual(getattr(namespace, option.dest), choice) with self.assertRaises(ParserError): self.parse_args([option.option_string, 'INVALID']) return test @classmethod def _generate_int_option_test(cls, option): def test(self): with self.assertNotRaises(ParserError): for i in [0, 1, 42]: namespace = self.parse_args([option.option_string, str(i)]) self.assertEqual(int(getattr(namespace, option.dest)), i) # FIXME: int-type options should not accept non-int strings # with self.assertRaises(ParserError): # self.parse_args([option.option_string, str(0.0)]) # self.parse_args([option.option_string, str(1.0)]) # self.parse_args([option.option_string, str(3.14)]) # self.parse_args([option.option_string, 'NaN']) return test @classmethod def _generate_str_option_test(cls, option): def test(self): with self.assertNotRaises(ParserError): self.parse_args([option.option_string, 'foo']) return test @classmethod def _generate_path_option_test(cls, option): def test(self): with self.assertNotRaises(ParserError): self.parse_args([option.option_string, sys.executable]) # FIXME: path-type options should not accept non-path inputs # with self.assertRaises(ParserError): # self.parse_args([option.option_string, 'foo']) return test @classmethod def _generate_append_option_test(cls, option): def test(self): with self.assertNotRaises(ParserError): # Range size is arbitrary, just needs to be more than once for i in range(1, 4): namespace = self.parse_args( [option.option_string, 'ARG'] * i) self.assertEqual(getattr(namespace, option.dest), ['ARG'] * i) return test @classmethod def _generate_unsupported_option_test(cls, option): def test(self): with self.assertRaises(ParserError): self.parse_args([option.option_string]) return test @classmethod def _generate_build_script_impl_option_test(cls, option): def test(self): with self.assertNotRaises(ParserError): namespace, unknown_args = self.parse_args_and_unknown_args([]) self.assertFalse(hasattr(namespace, option.dest)) self.assertEqual(unknown_args, []) namespace, unknown_args = self.parse_args_and_unknown_args( [option.option_string]) # The argument should never show up in the namespace self.assertFalse(hasattr(namespace, option.dest)) # It should instead be forwareded to unkown_args self.assertEqual(unknown_args, [option.option_string]) return test @classmethod def generate_option_test(cls, option): generate_test_funcs = { eo.HelpOption: cls._generate_help_option_test, eo.SetOption: cls._generate_set_option_test, eo.SetTrueOption: cls._generate_set_true_option_test, eo.SetFalseOption: cls._generate_set_false_option_test, eo.EnableOption: cls._generate_enable_option_test, eo.DisableOption: cls._generate_disable_option_test, eo.ChoicesOption: cls._generate_choices_option_test, eo.IntOption: cls._generate_int_option_test, eo.StrOption: cls._generate_str_option_test, eo.PathOption: cls._generate_path_option_test, eo.AppendOption: cls._generate_append_option_test, eo.UnsupportedOption: cls._generate_unsupported_option_test, eo.BuildScriptImplOption: cls._generate_build_script_impl_option_test, # IgnoreOptions should be manually tested eo.IgnoreOption: lambda self: None, } test_func = generate_test_funcs.get(option.__class__, None) if test_func is not None: return test_func(option) # Catch-all meaningless test return lambda self: \ self.fail('unexpected option "{}"'.format(option.option_string)) @classmethod def generate_preset_test(cls, preset_name, preset_args): def test(self): try: # Windows cannot run build-script-impl to check the impl args. is_windows = platform.system() == 'Windows' self.parse_default_args(preset_args, check_impl_args=not is_windows) except ParserError as e: self.fail('failed to parse preset "{}": {}'.format( preset_name, e)) return test class TestDriverArgumentParser(unittest.TestCase): __metaclass__ = TestDriverArgumentParserMeta @contextmanager def _quiet_output(self): with open(os.devnull, 'w') as devnull: with redirect_stderr(devnull), redirect_stdout(devnull): yield def _parse_args(self, args): try: return migration.parse_args(self.parser, args) except (SystemExit, ValueError) as e: raise ParserError('failed to parse arguments: ' + str(args), e) def _check_impl_args(self, namespace): assert hasattr(namespace, 'build_script_impl_args') try: migration.check_impl_args(BUILD_SCRIPT_IMPL, namespace.build_script_impl_args) except (SystemExit, ValueError) as e: raise ParserError('failed to parse impl arguments: ' + str(namespace.build_script_impl_args), e) def parse_args_and_unknown_args(self, args, namespace=None): if namespace is None: namespace = argparse.Namespace() with self._quiet_output(): try: namespace, unknown_args =\ super(self.parser.__class__, self.parser)\ .parse_known_args(args, namespace) namespace, unknown_args =\ migration.process_disambiguation_arguments(namespace, unknown_args) except (SystemExit, argparse.ArgumentError) as e: raise ParserError('failed to parse arguments: ' + str(args), e) return namespace, unknown_args def parse_args(self, args, namespace=None): namespace, unknown_args = self.parse_args_and_unknown_args(args, namespace) if unknown_args: raise ParserError('unknown arguments: ' + str(unknown_args)) return namespace def parse_default_args(self, args, check_impl_args=False): with self._quiet_output(): namespace = self._parse_args(args) if check_impl_args: self._check_impl_args(namespace) return namespace @contextmanager def assertNotRaises(self, exception): assert issubclass(exception, BaseException) try: yield except exception as e: self.fail(str(e)) def setUp(self): self.parser = driver_arguments.create_argument_parser() # ------------------------------------------------------------------------- def test_expected_options_exhaustive(self): """Test that we are exhaustively testing all options accepted by the parser. If this test if failing then the parser accepts more options than currently being tested, meaning the EXPECTED_OPTIONS list in build_swift/tests/expected_options.py should be updated to include the missing options. """ expected_options = {o.option_string for o in eo.EXPECTED_OPTIONS} # aggregate and flatten the options_strings accepted by the parser actual_options = [a.option_strings for a in self.parser._actions] actual_options = set(sum(actual_options, [])) diff = actual_options - expected_options if len(diff) > 0: self.fail('non-exhaustive expected options, missing: {}' .format(diff)) def test_expected_options_have_default_values(self): """Test that all the options in EXPECTED_OPTIONS have an associated default value. """ skip_option_classes = [ eo.HelpOption, eo.IgnoreOption, eo.UnsupportedOption, eo.BuildScriptImplOption, ] missing_defaults = set() for option in eo.EXPECTED_OPTIONS: if option.__class__ in skip_option_classes: continue if option.dest not in eo.EXPECTED_DEFAULTS: missing_defaults.add(option.dest) if len(missing_defaults) > 0: self.fail('non-exhaustive default values for options, missing: {}' .format(missing_defaults)) # ------------------------------------------------------------------------- # Manual option tests def test_option_clang_compiler_version(self): option_string = '--clang-compiler-version' with self.assertNotRaises(ParserError): self.parse_default_args([option_string, '5.0.0']) self.parse_default_args([option_string, '5.0.1']) self.parse_default_args([option_string, '5.0.0.1']) with self.assertRaises(ParserError): self.parse_default_args([option_string, '1']) self.parse_default_args([option_string, '1.2']) self.parse_default_args([option_string, '0.0.0.0.1']) def test_option_clang_user_visible_version(self): option_string = '--clang-user-visible-version' with self.assertNotRaises(ParserError): self.parse_default_args([option_string, '5.0.0']) self.parse_default_args([option_string, '5.0.1']) self.parse_default_args([option_string, '5.0.0.1']) with self.assertRaises(ParserError): self.parse_default_args([option_string, '1']) self.parse_default_args([option_string, '1.2']) self.parse_default_args([option_string, '0.0.0.0.1']) def test_option_swift_compiler_version(self): option_string = '--swift-compiler-version' with self.assertNotRaises(ParserError): self.parse_default_args([option_string, '4.1']) self.parse_default_args([option_string, '4.0.1']) self.parse_default_args([option_string, '200.99.1']) with self.assertRaises(ParserError): self.parse_default_args([option_string, '1']) self.parse_default_args([option_string, '0.0.0.1']) def test_option_swift_user_visible_version(self): option_string = '--swift-user-visible-version' with self.assertNotRaises(ParserError): self.parse_default_args([option_string, '4.1']) self.parse_default_args([option_string, '4.0.1']) self.parse_default_args([option_string, '200.99.1']) with self.assertRaises(ParserError): self.parse_default_args([option_string, '1']) self.parse_default_args([option_string, '0.0.0.1']) def test_option_I(self): with self.assertRaises(ValueError): self.parse_default_args(['-I']) def test_option_ios_all(self): with self.assertRaises(ValueError): self.parse_default_args(['--ios-all']) def test_option_tvos_all(self): with self.assertRaises(ValueError): self.parse_default_args(['--tvos-all']) def test_option_watchos_all(self): with self.assertRaises(ValueError): self.parse_default_args(['--watchos-all']) # ------------------------------------------------------------------------- # Implied defaults tests def test_implied_defaults_assertions(self): with self.assertNotRaises(ParserError): namespace = self.parse_default_args(['--assertions']) self.assertTrue(namespace.cmark_assertions) self.assertTrue(namespace.llvm_assertions) self.assertTrue(namespace.swift_assertions) self.assertTrue(namespace.swift_stdlib_assertions) def test_implied_defaults_cmark_build_variant(self): with self.assertNotRaises(ParserError): namespace = self.parse_default_args(['--debug-cmark']) self.assertTrue(namespace.build_cmark) def test_implied_defaults_lldb_build_variant(self): with self.assertNotRaises(ParserError): namespace = self.parse_default_args(['--debug-lldb']) self.assertTrue(namespace.build_lldb) with self.assertNotRaises(ParserError): namespace = self.parse_default_args(['--lldb-assertions']) self.assertTrue(namespace.build_lldb) def test_implied_defaults_build_variant(self): with self.assertNotRaises(ParserError): namespace = self.parse_default_args(['--debug']) self.assertEqual(namespace.cmark_build_variant, 'Debug') self.assertEqual(namespace.foundation_build_variant, 'Debug') self.assertEqual(namespace.libdispatch_build_variant, 'Debug') self.assertEqual(namespace.libicu_build_variant, 'Debug') self.assertEqual(namespace.lldb_build_variant, 'Debug') self.assertEqual(namespace.llvm_build_variant, 'Debug') self.assertEqual(namespace.swift_build_variant, 'Debug') self.assertEqual(namespace.swift_stdlib_build_variant, 'Debug') def test_implied_defaults_skip_build(self): with self.assertNotRaises(ParserError): namespace = self.parse_default_args(['--skip-build']) self.assertFalse(namespace.build_benchmarks) self.assertFalse(namespace.build_linux) self.assertFalse(namespace.build_android) self.assertFalse(namespace.build_freebsd) self.assertFalse(namespace.build_cygwin) self.assertFalse(namespace.build_osx) self.assertFalse(namespace.build_ios) self.assertFalse(namespace.build_tvos) self.assertFalse(namespace.build_watchos) self.assertFalse(namespace.build_foundation) self.assertFalse(namespace.build_libdispatch) self.assertFalse(namespace.build_libicu) self.assertFalse(namespace.build_lldb) self.assertFalse(namespace.build_llbuild) self.assertFalse(namespace.build_libcxx) self.assertFalse(namespace.build_playgroundsupport) self.assertFalse(namespace.build_swiftpm) self.assertFalse(namespace.build_xctest) def test_implied_defaults_skip_build_ios(self): with self.assertNotRaises(ParserError): namespace = self.parse_default_args(['--skip-build-ios']) self.assertFalse(namespace.build_ios_device) self.assertFalse(namespace.build_ios_simulator) # Also implies that the tests should be skipped self.assertFalse(namespace.test_ios_host) self.assertFalse(namespace.test_ios_simulator) def test_implied_defaults_skip_build_tvos(self): with self.assertNotRaises(ParserError): namespace = self.parse_default_args(['--skip-build-tvos']) self.assertFalse(namespace.build_tvos_device) self.assertFalse(namespace.build_tvos_simulator) # Also implies that the tests should be skipped self.assertFalse(namespace.test_tvos_host) self.assertFalse(namespace.test_tvos_simulator) def test_implied_defaults_skip_build_watchos(self): with self.assertNotRaises(ParserError): namespace = self.parse_default_args(['--skip-build-watchos']) self.assertFalse(namespace.build_watchos_device) self.assertFalse(namespace.build_watchos_simulator) # Also implies that the tests should be skipped self.assertFalse(namespace.test_watchos_host) self.assertFalse(namespace.test_watchos_simulator) def test_implied_defaults_validation_test(self): with self.assertNotRaises(ParserError): namespace = self.parse_default_args(['--validation-test']) self.assertTrue(namespace.test) def test_implied_defaults_test_optimized(self): with self.assertNotRaises(ParserError): namespace = self.parse_default_args(['--test-optimized']) self.assertTrue(namespace.test) def test_implied_defaults_test_optimize_for_size(self): with self.assertNotRaises(ParserError): namespace = self.parse_default_args(['--test-optimize-for-size']) self.assertTrue(namespace.test) def test_implied_defaults_test_optimize_none_with_implicit_dynamic(self): with self.assertNotRaises(ParserError): namespace = self.parse_default_args( ['--test-optimize-none-with-implicit-dynamic']) self.assertTrue(namespace.test) def test_implied_defaults_skip_all_tests(self): with self.assertNotRaises(ParserError): namespace = self.parse_default_args([ '--test', '0', '--validation-test', '0', '--long-test', '0', '--stress-test', '0', ]) self.assertFalse(namespace.test_linux) self.assertFalse(namespace.test_freebsd) self.assertFalse(namespace.test_cygwin) self.assertFalse(namespace.test_osx) self.assertFalse(namespace.test_ios) self.assertFalse(namespace.test_tvos) self.assertFalse(namespace.test_watchos) def test_implied_defaults_skip_test_ios(self): with self.assertNotRaises(ParserError): namespace = self.parse_default_args(['--skip-test-ios']) self.assertFalse(namespace.test_ios_host) self.assertFalse(namespace.test_ios_simulator) def test_implied_defaults_skip_test_tvos(self): with self.assertNotRaises(ParserError): namespace = self.parse_default_args(['--skip-test-tvos']) self.assertFalse(namespace.test_tvos_host) self.assertFalse(namespace.test_tvos_simulator) def test_implied_defaults_skip_test_watchos(self): with self.assertNotRaises(ParserError): namespace = self.parse_default_args(['--skip-test-watchos']) self.assertFalse(namespace.test_watchos_host) self.assertFalse(namespace.test_watchos_simulator) def test_implied_defaults_skip_build_android(self): with self.assertNotRaises(ParserError): namespace = self.parse_default_args(['--android', '0']) self.assertFalse(namespace.test_android_host) with self.assertNotRaises(ParserError): namespace = self.parse_default_args(['--skip-build-android']) self.assertFalse(namespace.test_android_host) def test_implied_defaults_host_test(self): with self.assertNotRaises(ParserError): namespace = self.parse_default_args(['--host-test', '0']) self.assertFalse(namespace.test_ios_host) self.assertFalse(namespace.test_tvos_host) self.assertFalse(namespace.test_watchos_host) self.assertFalse(namespace.test_android_host) self.assertFalse(namespace.build_libparser_only) def test_build_lib_swiftsyntaxparser_only(self): with self.assertNotRaises(ParserError): namespace = self.parse_default_args(['--build-libparser-only']) self.assertTrue(namespace.build_libparser_only) if __name__ == '__main__': unittest.main()
{ "content_hash": "35c8e6c94ee3510baff82743fa11c1cb", "timestamp": "", "source": "github", "line_count": 733, "max_line_length": 79, "avg_line_length": 39.282401091405184, "alnum_prop": 0.603459053969577, "repo_name": "lorentey/swift", "id": "a93f412631b5db960ca7347ef4f4332cd54c9df3", "size": "29140", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "utils/build_swift/tests/test_driver_arguments.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "13203" }, { "name": "C", "bytes": "232100" }, { "name": "C++", "bytes": "34440043" }, { "name": "CMake", "bytes": "541520" }, { "name": "D", "bytes": "1107" }, { "name": "DTrace", "bytes": "2438" }, { "name": "Emacs Lisp", "bytes": "57302" }, { "name": "LLVM", "bytes": "70517" }, { "name": "MATLAB", "bytes": "2576" }, { "name": "Makefile", "bytes": "1841" }, { "name": "Objective-C", "bytes": "429426" }, { "name": "Objective-C++", "bytes": "249901" }, { "name": "Perl", "bytes": "2211" }, { "name": "Python", "bytes": "1612445" }, { "name": "Roff", "bytes": "3495" }, { "name": "Ruby", "bytes": "2091" }, { "name": "Shell", "bytes": "189755" }, { "name": "Swift", "bytes": "31105346" }, { "name": "Vim Script", "bytes": "16883" }, { "name": "sed", "bytes": "1050" } ], "symlink_target": "" }
from datetime import datetime from unittest import mock from django.core.urlresolvers import reverse from django.utils.timezone import make_aware from django_webtest import WebTest from testing.common import make_user, login_user from apps.links.models import Link class LinkTest(WebTest): def setUp(self): self.logged_in_user = make_user() self.other_user = make_user( userid='other_user', email='fake2@dstl.gov.uk', name='Fake2 Fakerly') self.assertTrue(login_user(self, self.logged_in_user)) def test_create_link(self): response = self.app.get(reverse('link-create')) form = response.form self.assertEquals( response.html.h1.get_text(strip=True), 'ToolAdd new tool' ) self.assertEquals(form['name'].value, '') self.assertEquals(form['description'].value, '') self.assertEquals(form['destination'].value, '') self.assertEquals(form['categories'].value, '') form['name'] = 'Google' form['destination'] = 'https://google.com' response = form.submit().follow() self.assertIn('Google', response.html.find('h1').text) self.assertIn( 'Fake Fakerly', response.html.find(id='link_owner').text, ) def test_create_link_external(self): with mock.patch('django.utils.timezone.now') as mock_now: mock_now.return_value = make_aware(datetime(2016, 3, 1, 10, 0, 0)) form = self.app.get(reverse('link-create')).form self.assertEquals(form['name'].value, '') self.assertEquals(form['description'].value, '') self.assertEquals(form['destination'].value, '') self.assertEquals(form['categories'].value, '') self.assertEqual(form['is_external'].value, 'False') form['name'] = 'Google' form['destination'] = 'https://google.com' form['is_external'].select('True') response = form.submit().follow() self.assertIn('Google', response.html.find('h1').text) self.assertIn( 'External', response.html.find(id="is_external").text ) self.assertIn( '01/03/2016', response.html.find(id="date_added").text ) self.assertIn( 'Fake Fakerly', response.html.find(id='link_owner').text, ) def test_update_link_button(self): existing_link = Link( name='Wikimapia', description='A great mapping application', destination='https://wikimapia.org', owner=self.logged_in_user, is_external=False) existing_link.save() response = self.app.get( reverse('link-detail', kwargs={'pk': existing_link.pk})) edit_button = response.html.find(None, {"id": "edit-button"}) self.assertIsNotNone(edit_button) self.assertEqual( reverse('link-edit', kwargs={'pk': existing_link.pk}), edit_button.get('href') ) def test_update_link_external(self): existing_link = Link( name='Wikimapia', description='A great mapping application', destination='https://wikimapia.org', owner=self.logged_in_user, is_external=False) existing_link.save() response = self.app.get( reverse('link-edit', kwargs={'pk': existing_link.pk})) form = response.form self.assertEquals(response.html.h1.get_text(strip=True), 'ToolEdit %s' % existing_link.name) self.assertEquals(form['name'].value, 'Wikimapia') self.assertEquals(form['description'].value, 'A great mapping application') self.assertEquals(form['destination'].value, 'https://wikimapia.org') self.assertEqual(form['is_external'].value, 'False') form['is_external'].select('True') response = form.submit().follow() self.assertIn('Wikimapia', response.html.find('h1').text) self.assertIn('External', response.html.find(id="is_external").text) def test_create_empty_link(self): form = self.app.get(reverse('link-create')).form self.assertEquals(form['name'].value, '') self.assertEquals(form['description'].value, '') self.assertEquals(form['destination'].value, '') self.assertEquals(form['categories'].value, '') form['name'] = '' form['destination'] = '' response = form.submit() error_list = response.html.find('ul', {'class': 'form-error-list'}) self.assertIsNotNone(error_list) self.assertEqual(len(error_list.findChildren('li')), 2) self.assertEqual(len(error_list.findChildren('a')), 2) self.assertIsNotNone(error_list.findChildren( 'a', {"href": '#id_name_group'} )) self.assertIsNotNone(error_list.findChildren( 'a', {"href": '#id_destination_group'} )) name_group = response.html.find(id='id_name_group') self.assertIsNotNone(name_group) name_errors = response.html.find(id='id_name_error_list') self.assertIsNotNone(name_errors) self.assertEqual(len(name_errors.findChildren()), 1) destination_errors = response.html.find(id='id_destination_error_list') self.assertIsNotNone(destination_errors) self.assertEqual(len(destination_errors.findChildren()), 1) form = response.form self.assertEquals(form['name'].value, '') self.assertEquals(form['description'].value, '') self.assertEquals(form['destination'].value, '') self.assertEquals(form['categories'].value, '') def test_edit_link_render(self): existing_link = Link( name='Wikimapia', description='A great mapping application', destination='https://wikimapia.org', owner=self.logged_in_user) existing_link.save() form = self.app.get( reverse('link-edit', kwargs={'pk': existing_link.pk})).form self.assertEquals(form['name'].value, 'Wikimapia') self.assertEquals(form['description'].value, 'A great mapping application') self.assertEquals(form['destination'].value, 'https://wikimapia.org') def test_edit_link_submit(self): existing_link = Link( name='Wikimapia', description='A great mapping application', destination='https://wikimapia.org', owner=self.logged_in_user) existing_link.save() form = self.app.get( reverse('link-edit', kwargs={'pk': existing_link.pk})).form self.assertEquals(form['name'].value, 'Wikimapia') self.assertEquals(form['description'].value, 'A great mapping application') self.assertEquals(form['destination'].value, 'https://wikimapia.org') form['name'].value = 'Bing Maps' form['description'].value = 'Another great mapping application' form['destination'].value = 'https://maps.bing.com' response = form.submit() self.assertEquals( 'http://localhost:80%s' % reverse( 'link-detail', kwargs={'pk': existing_link.pk}), response.location ) response = response.follow() self.assertIn('Bing Maps', response) self.assertNotIn('Wikimapia', response) self.assertIn('Another great mapping application', response) self.assertNotIn('A great mapping application', response) self.assertIn('https://maps.bing.com', response) self.assertNotIn('https://wikimapia.org', response) def test_update_to_empty_link(self): existing_link = Link( name='Wikimapia', description='A great mapping application', destination='https://wikimapia.org', owner=self.logged_in_user) existing_link.save() form = self.app.get( reverse('link-edit', kwargs={'pk': existing_link.pk})).form self.assertEquals(form['name'].value, 'Wikimapia') self.assertEquals(form['description'].value, 'A great mapping application') self.assertEquals(form['destination'].value, 'https://wikimapia.org') form['name'].value = '' form['description'].value = 'Another great mapping application' form['destination'].value = 'https://maps.bing.com' response = form.submit() error_list = response.html.find('ul', {'class': 'form-error-list'}) self.assertIsNotNone(error_list) self.assertEqual(len(error_list.findChildren('li')), 1) self.assertEqual(len(error_list.findChildren('a')), 1) self.assertIsNotNone(error_list.findChildren('a')[0].attrs['href']) self.assertEqual( error_list.findChildren('a')[0].attrs['href'], '#id_name_group' ) name_group = response.html.find(id='id_name_group') self.assertIsNotNone(name_group) name_errors = response.html.find(id='id_name_error_list') self.assertIsNotNone(name_errors) self.assertEqual(len(name_errors.findChildren()), 1) form = response.form self.assertEquals(form['name'].value, '') self.assertEquals(form['description'].value, 'Another great mapping application') self.assertEquals(form['destination'].value, 'https://maps.bing.com') self.assertEquals(form['categories'].value, '')
{ "content_hash": "2c6423badc1d8f55c552cba9dcb67017", "timestamp": "", "source": "github", "line_count": 265, "max_line_length": 79, "avg_line_length": 36.83396226415094, "alnum_prop": 0.5889765392890073, "repo_name": "dstl/lighthouse", "id": "58ae98c301b82a77c1276639c41c209f35963ed5", "size": "9803", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "apps/links/tests/test_create_update.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "171981" }, { "name": "HTML", "bytes": "188493" }, { "name": "JavaScript", "bytes": "175233" }, { "name": "Python", "bytes": "301851" }, { "name": "Ruby", "bytes": "299" }, { "name": "Shell", "bytes": "8326" } ], "symlink_target": "" }
from __future__ import absolute_import import httplib import functools import flask from flask.ext.login import current_user from guardrail.core import decorators from guardrail.ext.sqlalchemy import SqlalchemyLoader from guardrail.ext.sqlalchemy import SqlalchemyPermissionManager import models manager = SqlalchemyPermissionManager(models.db.session) def user_loader(*args, **kwargs): return current_user._get_current_object() error_messages = { decorators.AGENT_NOT_FOUND: httplib.NOT_FOUND, decorators.TARGET_NOT_FOUND: httplib.NOT_FOUND, decorators.FORBIDDEN: httplib.FORBIDDEN, } def error_handler(message): flask.abort(error_messages.get(message, httplib.INTERNAL_SERVER_ERROR)) has_permission = functools.partial( decorators.has_permission, manager=manager, agent_loader=user_loader, error_handler=error_handler, ) has_post_permission = functools.partial( has_permission, target_loader=SqlalchemyLoader(models.Post, models.db.session) )
{ "content_hash": "d831f385165d5bf8c57c49f527123e96", "timestamp": "", "source": "github", "line_count": 38, "max_line_length": 75, "avg_line_length": 26.263157894736842, "alnum_prop": 0.7775551102204409, "repo_name": "jmcarp/guardrail", "id": "112c92e227306198144b348fa194a6d3e17c4d4e", "size": "1023", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "examples/flask_sqla/permissions.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "55757" } ], "symlink_target": "" }
'''Multithreading Bottle server adapter.''' import bottle import mtwsgi class MTServer(bottle.ServerAdapter): def run(self, handler): thread_count = self.options.pop('thread_count', None) server = mtwsgi.make_server(self.host, self.port, handler, thread_count, **self.options) server.serve_forever() if __name__ == '__main__': import bottle import time app = bottle.Bottle() @app.route('/') def foo(): time.sleep(2) return 'hello, world!\n' app.run(server=MTServer, host='0.0.0.0', port=8080, thread_count=3) # or: # httpd = mtwsgi.make_server('0.0.0.0', 8080, app, 3) # httpd.serve_forever()
{ "content_hash": "c25d2c4724c40e7dd0dbea3e05408515", "timestamp": "", "source": "github", "line_count": 30, "max_line_length": 96, "avg_line_length": 22.766666666666666, "alnum_prop": 0.6134699853587116, "repo_name": "RonRothman/mtwsgi", "id": "affdb2d4e6da84ebf0c60963fde32b38a03f3084", "size": "683", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "mtbottle.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "2424" } ], "symlink_target": "" }
"""sqlalchemy.orm.interfaces.LoaderStrategy implementations, and related MapperOptions.""" from .. import exc as sa_exc, inspect from .. import util, log, event from ..sql import util as sql_util, visitors from .. import sql from . import ( attributes, interfaces, exc as orm_exc, loading, unitofwork, util as orm_util ) from .state import InstanceState from .util import _none_set from . import properties from .interfaces import ( LoaderStrategy, StrategizedProperty ) from .base import _SET_DEFERRED_EXPIRED, _DEFER_FOR_STATE from .session import _state_session import itertools def _register_attribute( prop, mapper, useobject, compare_function=None, typecallable=None, callable_=None, proxy_property=None, active_history=False, impl_class=None, **kw ): attribute_ext = list(util.to_list(prop.extension, default=[])) listen_hooks = [] uselist = useobject and prop.uselist if useobject and prop.single_parent: listen_hooks.append(single_parent_validator) if prop.key in prop.parent.validators: fn, opts = prop.parent.validators[prop.key] listen_hooks.append( lambda desc, prop: orm_util._validator_events( desc, prop.key, fn, **opts) ) if useobject: listen_hooks.append(unitofwork.track_cascade_events) # need to assemble backref listeners # after the singleparentvalidator, mapper validator if useobject: backref = prop.back_populates if backref: listen_hooks.append( lambda desc, prop: attributes.backref_listeners( desc, backref, uselist ) ) # a single MapperProperty is shared down a class inheritance # hierarchy, so we set up attribute instrumentation and backref event # for each mapper down the hierarchy. # typically, "mapper" is the same as prop.parent, due to the way # the configure_mappers() process runs, however this is not strongly # enforced, and in the case of a second configure_mappers() run the # mapper here might not be prop.parent; also, a subclass mapper may # be called here before a superclass mapper. That is, can't depend # on mappers not already being set up so we have to check each one. for m in mapper.self_and_descendants: if prop is m._props.get(prop.key) and \ not m.class_manager._attr_has_impl(prop.key): desc = attributes.register_attribute_impl( m.class_, prop.key, parent_token=prop, uselist=uselist, compare_function=compare_function, useobject=useobject, extension=attribute_ext, trackparent=useobject and ( prop.single_parent or prop.direction is interfaces.ONETOMANY), typecallable=typecallable, callable_=callable_, active_history=active_history, impl_class=impl_class, send_modified_events=not useobject or not prop.viewonly, doc=prop.doc, **kw ) for hook in listen_hooks: hook(desc, prop) @properties.ColumnProperty.strategy_for(instrument=False, deferred=False) class UninstrumentedColumnLoader(LoaderStrategy): """Represent a non-instrumented MapperProperty. The polymorphic_on argument of mapper() often results in this, if the argument is against the with_polymorphic selectable. """ __slots__ = 'columns', def __init__(self, parent, strategy_key): super(UninstrumentedColumnLoader, self).__init__(parent, strategy_key) self.columns = self.parent_property.columns def setup_query( self, context, entity, path, loadopt, adapter, column_collection=None, **kwargs): for c in self.columns: if adapter: c = adapter.columns[c] column_collection.append(c) def create_row_processor( self, context, path, loadopt, mapper, result, adapter, populators): pass @log.class_logger @properties.ColumnProperty.strategy_for(instrument=True, deferred=False) class ColumnLoader(LoaderStrategy): """Provide loading behavior for a :class:`.ColumnProperty`.""" __slots__ = 'columns', 'is_composite' def __init__(self, parent, strategy_key): super(ColumnLoader, self).__init__(parent, strategy_key) self.columns = self.parent_property.columns self.is_composite = hasattr(self.parent_property, 'composite_class') def setup_query( self, context, entity, path, loadopt, adapter, column_collection, memoized_populators, **kwargs): for c in self.columns: if adapter: c = adapter.columns[c] column_collection.append(c) fetch = self.columns[0] if adapter: fetch = adapter.columns[fetch] memoized_populators[self.parent_property] = fetch def init_class_attribute(self, mapper): self.is_class_level = True coltype = self.columns[0].type # TODO: check all columns ? check for foreign key as well? active_history = self.parent_property.active_history or \ self.columns[0].primary_key or \ mapper.version_id_col in set(self.columns) _register_attribute( self.parent_property, mapper, useobject=False, compare_function=coltype.compare_values, active_history=active_history ) def create_row_processor( self, context, path, loadopt, mapper, result, adapter, populators): # look through list of columns represented here # to see which, if any, is present in the row. for col in self.columns: if adapter: col = adapter.columns[col] getter = result._getter(col, False) if getter: populators["quick"].append((self.key, getter)) break else: populators["expire"].append((self.key, True)) @log.class_logger @properties.ColumnProperty.strategy_for(deferred=True, instrument=True) class DeferredColumnLoader(LoaderStrategy): """Provide loading behavior for a deferred :class:`.ColumnProperty`.""" __slots__ = 'columns', 'group' def __init__(self, parent, strategy_key): super(DeferredColumnLoader, self).__init__(parent, strategy_key) if hasattr(self.parent_property, 'composite_class'): raise NotImplementedError("Deferred loading for composite " "types not implemented yet") self.columns = self.parent_property.columns self.group = self.parent_property.group def create_row_processor( self, context, path, loadopt, mapper, result, adapter, populators): # this path currently does not check the result # for the column; this is because in most cases we are # working just with the setup_query() directive which does # not support this, and the behavior here should be consistent. if not self.is_class_level: set_deferred_for_local_state = \ self.parent_property._deferred_column_loader populators["new"].append((self.key, set_deferred_for_local_state)) else: populators["expire"].append((self.key, False)) def init_class_attribute(self, mapper): self.is_class_level = True _register_attribute( self.parent_property, mapper, useobject=False, compare_function=self.columns[0].type.compare_values, callable_=self._load_for_state, expire_missing=False ) def setup_query( self, context, entity, path, loadopt, adapter, column_collection, memoized_populators, only_load_props=None, **kw): if ( ( loadopt and 'undefer_pks' in loadopt.local_opts and set(self.columns).intersection( self.parent._should_undefer_in_wildcard) ) or ( loadopt and self.group and loadopt.local_opts.get('undefer_group_%s' % self.group, False) ) or ( only_load_props and self.key in only_load_props ) ): self.parent_property._get_strategy( (("deferred", False), ("instrument", True)) ).setup_query( context, entity, path, loadopt, adapter, column_collection, memoized_populators, **kw) elif self.is_class_level: memoized_populators[self.parent_property] = _SET_DEFERRED_EXPIRED else: memoized_populators[self.parent_property] = _DEFER_FOR_STATE def _load_for_state(self, state, passive): if not state.key: return attributes.ATTR_EMPTY if not passive & attributes.SQL_OK: return attributes.PASSIVE_NO_RESULT localparent = state.manager.mapper if self.group: toload = [ p.key for p in localparent.iterate_properties if isinstance(p, StrategizedProperty) and isinstance(p.strategy, DeferredColumnLoader) and p.group == self.group ] else: toload = [self.key] # narrow the keys down to just those which have no history group = [k for k in toload if k in state.unmodified] session = _state_session(state) if session is None: raise orm_exc.DetachedInstanceError( "Parent instance %s is not bound to a Session; " "deferred load operation of attribute '%s' cannot proceed" % (orm_util.state_str(state), self.key) ) query = session.query(localparent) if loading.load_on_ident( query, state.key, only_load_props=group, refresh_state=state) is None: raise orm_exc.ObjectDeletedError(state) return attributes.ATTR_WAS_SET class LoadDeferredColumns(object): """serializable loader object used by DeferredColumnLoader""" def __init__(self, key): self.key = key def __call__(self, state, passive=attributes.PASSIVE_OFF): key = self.key localparent = state.manager.mapper prop = localparent._props[key] strategy = prop._strategies[DeferredColumnLoader] return strategy._load_for_state(state, passive) class AbstractRelationshipLoader(LoaderStrategy): """LoaderStratgies which deal with related objects.""" __slots__ = 'mapper', 'target', 'uselist' def __init__(self, parent, strategy_key): super(AbstractRelationshipLoader, self).__init__(parent, strategy_key) self.mapper = self.parent_property.mapper self.target = self.parent_property.target self.uselist = self.parent_property.uselist @log.class_logger @properties.RelationshipProperty.strategy_for(lazy="noload") @properties.RelationshipProperty.strategy_for(lazy=None) class NoLoader(AbstractRelationshipLoader): """Provide loading behavior for a :class:`.RelationshipProperty` with "lazy=None". """ __slots__ = () def init_class_attribute(self, mapper): self.is_class_level = True _register_attribute( self.parent_property, mapper, useobject=True, typecallable=self.parent_property.collection_class, ) def create_row_processor( self, context, path, loadopt, mapper, result, adapter, populators): def invoke_no_load(state, dict_, row): if self.uselist: state.manager.get_impl(self.key).initialize(state, dict_) else: dict_[self.key] = None populators["new"].append((self.key, invoke_no_load)) @log.class_logger @properties.RelationshipProperty.strategy_for(lazy=True) @properties.RelationshipProperty.strategy_for(lazy="select") @properties.RelationshipProperty.strategy_for(lazy="raise") @properties.RelationshipProperty.strategy_for(lazy="raise_on_sql") class LazyLoader(AbstractRelationshipLoader, util.MemoizedSlots): """Provide loading behavior for a :class:`.RelationshipProperty` with "lazy=True", that is loads when first accessed. """ __slots__ = ( '_lazywhere', '_rev_lazywhere', 'use_get', '_bind_to_col', '_equated_columns', '_rev_bind_to_col', '_rev_equated_columns', '_simple_lazy_clause', '_raise_always', '_raise_on_sql') def __init__(self, parent, strategy_key): super(LazyLoader, self).__init__(parent, strategy_key) self._raise_always = self.strategy_opts["lazy"] == "raise" self._raise_on_sql = self.strategy_opts["lazy"] == "raise_on_sql" join_condition = self.parent_property._join_condition self._lazywhere, \ self._bind_to_col, \ self._equated_columns = join_condition.create_lazy_clause() self._rev_lazywhere, \ self._rev_bind_to_col, \ self._rev_equated_columns = join_condition.create_lazy_clause( reverse_direction=True) self.logger.info("%s lazy loading clause %s", self, self._lazywhere) # determine if our "lazywhere" clause is the same as the mapper's # get() clause. then we can just use mapper.get() self.use_get = not self.uselist and \ self.mapper._get_clause[0].compare( self._lazywhere, use_proxies=True, equivalents=self.mapper._equivalent_columns ) if self.use_get: for col in list(self._equated_columns): if col in self.mapper._equivalent_columns: for c in self.mapper._equivalent_columns[col]: self._equated_columns[c] = self._equated_columns[col] self.logger.info("%s will use query.get() to " "optimize instance loads", self) def init_class_attribute(self, mapper): self.is_class_level = True active_history = ( self.parent_property.active_history or self.parent_property.direction is not interfaces.MANYTOONE or not self.use_get ) # MANYTOONE currently only needs the # "old" value for delete-orphan # cascades. the required _SingleParentValidator # will enable active_history # in that case. otherwise we don't need the # "old" value during backref operations. _register_attribute( self.parent_property, mapper, useobject=True, callable_=self._load_for_state, typecallable=self.parent_property.collection_class, active_history=active_history ) def _memoized_attr__simple_lazy_clause(self): criterion, bind_to_col = ( self._lazywhere, self._bind_to_col ) params = [] def visit_bindparam(bindparam): bindparam.unique = False if bindparam._identifying_key in bind_to_col: params.append(( bindparam.key, bind_to_col[bindparam._identifying_key], None)) elif bindparam.callable is None: params.append((bindparam.key, None, bindparam.value)) criterion = visitors.cloned_traverse( criterion, {}, {'bindparam': visit_bindparam} ) return criterion, params def _generate_lazy_clause(self, state, passive): criterion, param_keys = self._simple_lazy_clause if state is None: return sql_util.adapt_criterion_to_null( criterion, [key for key, ident, value in param_keys]) mapper = self.parent_property.parent o = state.obj() # strong ref dict_ = attributes.instance_dict(o) if passive & attributes.INIT_OK: passive ^= attributes.INIT_OK params = {} for key, ident, value in param_keys: if ident is not None: if passive and passive & attributes.LOAD_AGAINST_COMMITTED: value = mapper._get_committed_state_attr_by_column( state, dict_, ident, passive) else: value = mapper._get_state_attr_by_column( state, dict_, ident, passive) params[key] = value return criterion, params def _invoke_raise_load(self, state, passive, lazy): raise sa_exc.InvalidRequestError( "'%s' is not available due to lazy='%s'" % (self, lazy) ) def _load_for_state(self, state, passive): if not state.key and ( ( not self.parent_property.load_on_pending and not state._load_pending ) or not state.session_id ): return attributes.ATTR_EMPTY pending = not state.key ident_key = None if ( (not passive & attributes.SQL_OK and not self.use_get) or (not passive & attributes.NON_PERSISTENT_OK and pending) ): return attributes.PASSIVE_NO_RESULT if self._raise_always: self._invoke_raise_load(state, passive, "raise") session = _state_session(state) if not session: raise orm_exc.DetachedInstanceError( "Parent instance %s is not bound to a Session; " "lazy load operation of attribute '%s' cannot proceed" % (orm_util.state_str(state), self.key) ) # if we have a simple primary key load, check the # identity map without generating a Query at all if self.use_get: ident = self._get_ident_for_use_get( session, state, passive ) if attributes.PASSIVE_NO_RESULT in ident: return attributes.PASSIVE_NO_RESULT elif attributes.NEVER_SET in ident: return attributes.NEVER_SET if _none_set.issuperset(ident): return None ident_key = self.mapper.identity_key_from_primary_key(ident) instance = loading.get_from_identity(session, ident_key, passive) if instance is not None: return instance elif not passive & attributes.SQL_OK or \ not passive & attributes.RELATED_OBJECT_OK: return attributes.PASSIVE_NO_RESULT return self._emit_lazyload(session, state, ident_key, passive) def _get_ident_for_use_get(self, session, state, passive): instance_mapper = state.manager.mapper if passive & attributes.LOAD_AGAINST_COMMITTED: get_attr = instance_mapper._get_committed_state_attr_by_column else: get_attr = instance_mapper._get_state_attr_by_column dict_ = state.dict return [ get_attr( state, dict_, self._equated_columns[pk], passive=passive) for pk in self.mapper.primary_key ] @util.dependencies("sqlalchemy.orm.strategy_options") def _emit_lazyload( self, strategy_options, session, state, ident_key, passive): q = session.query(self.mapper)._adapt_all_clauses() if self.parent_property.secondary is not None: q = q.select_from(self.mapper, self.parent_property.secondary) q = q._with_invoke_all_eagers(False) pending = not state.key # don't autoflush on pending if pending or passive & attributes.NO_AUTOFLUSH: q = q.autoflush(False) if state.load_path: q = q._with_current_path(state.load_path[self.parent_property]) if state.load_options: q = q._conditional_options(*state.load_options) if self.use_get: if self._raise_on_sql: self._invoke_raise_load(state, passive, "raise_on_sql") return loading.load_on_ident(q, ident_key) if self.parent_property.order_by: q = q.order_by(*util.to_list(self.parent_property.order_by)) for rev in self.parent_property._reverse_property: # reverse props that are MANYTOONE are loading *this* # object from get(), so don't need to eager out to those. if rev.direction is interfaces.MANYTOONE and \ rev._use_get and \ not isinstance(rev.strategy, LazyLoader): q = q.options( strategy_options.Load.for_existing_path( q._current_path[rev.parent] ).lazyload(rev.key) ) lazy_clause, params = self._generate_lazy_clause( state, passive=passive) if pending: if util.has_intersection( orm_util._none_set, params.values()): return None elif util.has_intersection(orm_util._never_set, params.values()): return None if self._raise_on_sql: self._invoke_raise_load(state, passive, "raise_on_sql") q = q.filter(lazy_clause).params(params) result = q.all() if self.uselist: return result else: l = len(result) if l: if l > 1: util.warn( "Multiple rows returned with " "uselist=False for lazily-loaded attribute '%s' " % self.parent_property) return result[0] else: return None def create_row_processor( self, context, path, loadopt, mapper, result, adapter, populators): key = self.key if not self.is_class_level: # we are not the primary manager for this attribute # on this class - set up a # per-instance lazyloader, which will override the # class-level behavior. # this currently only happens when using a # "lazyload" option on a "no load" # attribute - "eager" attributes always have a # class-level lazyloader installed. set_lazy_callable = InstanceState._instance_level_callable_processor( mapper.class_manager, LoadLazyAttribute(key, self), key) populators["new"].append((self.key, set_lazy_callable)) elif context.populate_existing or mapper.always_refresh: def reset_for_lazy_callable(state, dict_, row): # we are the primary manager for this attribute on # this class - reset its # per-instance attribute state, so that the class-level # lazy loader is # executed when next referenced on this instance. # this is needed in # populate_existing() types of scenarios to reset # any existing state. state._reset(dict_, key) populators["new"].append((self.key, reset_for_lazy_callable)) class LoadLazyAttribute(object): """serializable loader object used by LazyLoader""" def __init__(self, key, initiating_strategy): self.key = key self.strategy_key = initiating_strategy.strategy_key def __call__(self, state, passive=attributes.PASSIVE_OFF): key = self.key instance_mapper = state.manager.mapper prop = instance_mapper._props[key] strategy = prop._strategies[self.strategy_key] return strategy._load_for_state(state, passive) @properties.RelationshipProperty.strategy_for(lazy="immediate") class ImmediateLoader(AbstractRelationshipLoader): __slots__ = () def init_class_attribute(self, mapper): self.parent_property.\ _get_strategy((("lazy", "select"),)).\ init_class_attribute(mapper) def setup_query( self, context, entity, path, loadopt, adapter, column_collection=None, parentmapper=None, **kwargs): pass def create_row_processor( self, context, path, loadopt, mapper, result, adapter, populators): def load_immediate(state, dict_, row): state.get_impl(self.key).get(state, dict_) populators["delayed"].append((self.key, load_immediate)) @log.class_logger @properties.RelationshipProperty.strategy_for(lazy="subquery") class SubqueryLoader(AbstractRelationshipLoader): __slots__ = 'join_depth', def __init__(self, parent, strategy_key): super(SubqueryLoader, self).__init__(parent, strategy_key) self.join_depth = self.parent_property.join_depth def init_class_attribute(self, mapper): self.parent_property.\ _get_strategy((("lazy", "select"),)).\ init_class_attribute(mapper) def setup_query( self, context, entity, path, loadopt, adapter, column_collection=None, parentmapper=None, **kwargs): if not context.query._enable_eagerloads: return elif context.query._yield_per: context.query._no_yield_per("subquery") path = path[self.parent_property] # build up a path indicating the path from the leftmost # entity to the thing we're subquery loading. with_poly_info = path.get( context.attributes, "path_with_polymorphic", None) if with_poly_info is not None: effective_entity = with_poly_info.entity else: effective_entity = self.mapper subq_path = context.attributes.get( ('subquery_path', None), orm_util.PathRegistry.root) subq_path = subq_path + path # if not via query option, check for # a cycle if not path.contains(context.attributes, "loader"): if self.join_depth: if path.length / 2 > self.join_depth: return elif subq_path.contains_mapper(self.mapper): return leftmost_mapper, leftmost_attr, leftmost_relationship = \ self._get_leftmost(subq_path) orig_query = context.attributes.get( ("orig_query", SubqueryLoader), context.query) # generate a new Query from the original, then # produce a subquery from it. left_alias = self._generate_from_original_query( orig_query, leftmost_mapper, leftmost_attr, leftmost_relationship, entity.entity_zero ) # generate another Query that will join the # left alias to the target relationships. # basically doing a longhand # "from_self()". (from_self() itself not quite industrial # strength enough for all contingencies...but very close) q = orig_query.session.query(effective_entity) q._attributes = { ("orig_query", SubqueryLoader): orig_query, ('subquery_path', None): subq_path } q = q._set_enable_single_crit(False) to_join, local_attr, parent_alias = \ self._prep_for_joins(left_alias, subq_path) q = q.order_by(*local_attr) q = q.add_columns(*local_attr) q = self._apply_joins( q, to_join, left_alias, parent_alias, effective_entity) q = self._setup_options(q, subq_path, orig_query, effective_entity) q = self._setup_outermost_orderby(q) # add new query to attributes to be picked up # by create_row_processor path.set(context.attributes, "subquery", q) def _get_leftmost(self, subq_path): subq_path = subq_path.path subq_mapper = orm_util._class_to_mapper(subq_path[0]) # determine attributes of the leftmost mapper if self.parent.isa(subq_mapper) and \ self.parent_property is subq_path[1]: leftmost_mapper, leftmost_prop = \ self.parent, self.parent_property else: leftmost_mapper, leftmost_prop = \ subq_mapper, \ subq_path[1] leftmost_cols = leftmost_prop.local_columns leftmost_attr = [ getattr( subq_path[0].entity, leftmost_mapper._columntoproperty[c].key) for c in leftmost_cols ] return leftmost_mapper, leftmost_attr, leftmost_prop def _generate_from_original_query( self, orig_query, leftmost_mapper, leftmost_attr, leftmost_relationship, orig_entity ): # reformat the original query # to look only for significant columns q = orig_query._clone().correlate(None) # set the query's "FROM" list explicitly to what the # FROM list would be in any case, as we will be limiting # the columns in the SELECT list which may no longer include # all entities mentioned in things like WHERE, JOIN, etc. if not q._from_obj: q._set_select_from( list(set([ ent['entity'] for ent in orig_query.column_descriptions if ent['entity'] is not None ])), False ) # select from the identity columns of the outer (specifically, these # are the 'local_cols' of the property). This will remove # other columns from the query that might suggest the right entity # which is why we do _set_select_from above. target_cols = q._adapt_col_list(leftmost_attr) q._set_entities(target_cols) distinct_target_key = leftmost_relationship.distinct_target_key if distinct_target_key is True: q._distinct = True elif distinct_target_key is None: # if target_cols refer to a non-primary key or only # part of a composite primary key, set the q as distinct for t in set(c.table for c in target_cols): if not set(target_cols).issuperset(t.primary_key): q._distinct = True break if q._order_by is False: q._order_by = leftmost_mapper.order_by # don't need ORDER BY if no limit/offset if q._limit is None and q._offset is None: q._order_by = None # the original query now becomes a subquery # which we'll join onto. embed_q = q.with_labels().subquery() left_alias = orm_util.AliasedClass( leftmost_mapper, embed_q, use_mapper_path=True) return left_alias def _prep_for_joins(self, left_alias, subq_path): # figure out what's being joined. a.k.a. the fun part to_join = [] pairs = list(subq_path.pairs()) for i, (mapper, prop) in enumerate(pairs): if i > 0: # look at the previous mapper in the chain - # if it is as or more specific than this prop's # mapper, use that instead. # note we have an assumption here that # the non-first element is always going to be a mapper, # not an AliasedClass prev_mapper = pairs[i - 1][1].mapper to_append = prev_mapper if prev_mapper.isa(mapper) else mapper else: to_append = mapper to_join.append((to_append, prop.key)) # determine the immediate parent class we are joining from, # which needs to be aliased. if len(to_join) < 2: # in the case of a one level eager load, this is the # leftmost "left_alias". parent_alias = left_alias else: info = inspect(to_join[-1][0]) if info.is_aliased_class: parent_alias = info.entity else: # alias a plain mapper as we may be # joining multiple times parent_alias = orm_util.AliasedClass( info.entity, use_mapper_path=True) local_cols = self.parent_property.local_columns local_attr = [ getattr(parent_alias, self.parent._columntoproperty[c].key) for c in local_cols ] return to_join, local_attr, parent_alias def _apply_joins( self, q, to_join, left_alias, parent_alias, effective_entity): ltj = len(to_join) if ltj == 1: to_join = [ getattr(left_alias, to_join[0][1]).of_type(effective_entity) ] elif ltj == 2: to_join = [ getattr(left_alias, to_join[0][1]).of_type(parent_alias), getattr(parent_alias, to_join[-1][1]).of_type(effective_entity) ] elif ltj > 2: middle = [ ( orm_util.AliasedClass(item[0]) if not inspect(item[0]).is_aliased_class else item[0].entity, item[1] ) for item in to_join[1:-1] ] inner = [] while middle: item = middle.pop(0) attr = getattr(item[0], item[1]) if middle: attr = attr.of_type(middle[0][0]) else: attr = attr.of_type(parent_alias) inner.append(attr) to_join = [ getattr(left_alias, to_join[0][1]).of_type(inner[0].parent) ] + inner + [ getattr(parent_alias, to_join[-1][1]).of_type(effective_entity) ] for attr in to_join: q = q.join(attr, from_joinpoint=True) return q def _setup_options(self, q, subq_path, orig_query, effective_entity): # propagate loader options etc. to the new query. # these will fire relative to subq_path. q = q._with_current_path(subq_path) q = q._conditional_options(*orig_query._with_options) if orig_query._populate_existing: q._populate_existing = orig_query._populate_existing return q def _setup_outermost_orderby(self, q): if self.parent_property.order_by: # if there's an ORDER BY, alias it the same # way joinedloader does, but we have to pull out # the "eagerjoin" from the query. # this really only picks up the "secondary" table # right now. eagerjoin = q._from_obj[0] eager_order_by = \ eagerjoin._target_adapter.\ copy_and_process( util.to_list( self.parent_property.order_by ) ) q = q.order_by(*eager_order_by) return q class _SubqCollections(object): """Given a :class:`.Query` used to emit the "subquery load", provide a load interface that executes the query at the first moment a value is needed. """ _data = None def __init__(self, subq): self.subq = subq def get(self, key, default): if self._data is None: self._load() return self._data.get(key, default) def _load(self): self._data = dict( (k, [vv[0] for vv in v]) for k, v in itertools.groupby( self.subq, lambda x: x[1:] ) ) def loader(self, state, dict_, row): if self._data is None: self._load() def create_row_processor( self, context, path, loadopt, mapper, result, adapter, populators): if not self.parent.class_manager[self.key].impl.supports_population: raise sa_exc.InvalidRequestError( "'%s' does not support object " "population - eager loading cannot be applied." % self) path = path[self.parent_property] subq = path.get(context.attributes, 'subquery') if subq is None: return assert subq.session is context.session, ( "Subquery session doesn't refer to that of " "our context. Are there broken context caching " "schemes being used?" ) local_cols = self.parent_property.local_columns # cache the loaded collections in the context # so that inheriting mappers don't re-load when they # call upon create_row_processor again collections = path.get(context.attributes, "collections") if collections is None: collections = self._SubqCollections(subq) path.set(context.attributes, 'collections', collections) if adapter: local_cols = [adapter.columns[c] for c in local_cols] if self.uselist: self._create_collection_loader( context, collections, local_cols, populators) else: self._create_scalar_loader( context, collections, local_cols, populators) def _create_collection_loader( self, context, collections, local_cols, populators): def load_collection_from_subq(state, dict_, row): collection = collections.get( tuple([row[col] for col in local_cols]), () ) state.get_impl(self.key).\ set_committed_value(state, dict_, collection) def load_collection_from_subq_existing_row(state, dict_, row): if self.key not in dict_: load_collection_from_subq(state, dict_, row) populators["new"].append( (self.key, load_collection_from_subq)) populators["existing"].append( (self.key, load_collection_from_subq_existing_row)) if context.invoke_all_eagers: populators["eager"].append((self.key, collections.loader)) def _create_scalar_loader( self, context, collections, local_cols, populators): def load_scalar_from_subq(state, dict_, row): collection = collections.get( tuple([row[col] for col in local_cols]), (None,) ) if len(collection) > 1: util.warn( "Multiple rows returned with " "uselist=False for eagerly-loaded attribute '%s' " % self) scalar = collection[0] state.get_impl(self.key).\ set_committed_value(state, dict_, scalar) def load_scalar_from_subq_existing_row(state, dict_, row): if self.key not in dict_: load_scalar_from_subq(state, dict_, row) populators["new"].append( (self.key, load_scalar_from_subq)) populators["existing"].append( (self.key, load_scalar_from_subq_existing_row)) if context.invoke_all_eagers: populators["eager"].append((self.key, collections.loader)) @log.class_logger @properties.RelationshipProperty.strategy_for(lazy="joined") @properties.RelationshipProperty.strategy_for(lazy=False) class JoinedLoader(AbstractRelationshipLoader): """Provide loading behavior for a :class:`.RelationshipProperty` using joined eager loading. """ __slots__ = 'join_depth', '_aliased_class_pool' def __init__(self, parent, strategy_key): super(JoinedLoader, self).__init__(parent, strategy_key) self.join_depth = self.parent_property.join_depth self._aliased_class_pool = [] def init_class_attribute(self, mapper): self.parent_property.\ _get_strategy((("lazy", "select"),)).init_class_attribute(mapper) def setup_query( self, context, entity, path, loadopt, adapter, column_collection=None, parentmapper=None, chained_from_outerjoin=False, **kwargs): """Add a left outer join to the statement that's being constructed.""" if not context.query._enable_eagerloads: return elif context.query._yield_per and self.uselist: context.query._no_yield_per("joined collection") path = path[self.parent_property] with_polymorphic = None user_defined_adapter = self._init_user_defined_eager_proc( loadopt, context) if loadopt else False if user_defined_adapter is not False: clauses, adapter, add_to_collection = \ self._setup_query_on_user_defined_adapter( context, entity, path, adapter, user_defined_adapter ) else: # if not via query option, check for # a cycle if not path.contains(context.attributes, "loader"): if self.join_depth: if path.length / 2 > self.join_depth: return elif path.contains_mapper(self.mapper): return clauses, adapter, add_to_collection, chained_from_outerjoin = \ self._generate_row_adapter( context, entity, path, loadopt, adapter, column_collection, parentmapper, chained_from_outerjoin ) with_poly_info = path.get( context.attributes, "path_with_polymorphic", None ) if with_poly_info is not None: with_polymorphic = with_poly_info.with_polymorphic_mappers else: with_polymorphic = None path = path[self.mapper] loading._setup_entity_query( context, self.mapper, entity, path, clauses, add_to_collection, with_polymorphic=with_polymorphic, parentmapper=self.mapper, chained_from_outerjoin=chained_from_outerjoin) if with_poly_info is not None and \ None in set(context.secondary_columns): raise sa_exc.InvalidRequestError( "Detected unaliased columns when generating joined " "load. Make sure to use aliased=True or flat=True " "when using joined loading with with_polymorphic()." ) def _init_user_defined_eager_proc(self, loadopt, context): # check if the opt applies at all if "eager_from_alias" not in loadopt.local_opts: # nope return False path = loadopt.path.parent # the option applies. check if the "user_defined_eager_row_processor" # has been built up. adapter = path.get( context.attributes, "user_defined_eager_row_processor", False) if adapter is not False: # just return it return adapter # otherwise figure it out. alias = loadopt.local_opts["eager_from_alias"] root_mapper, prop = path[-2:] #from .mapper import Mapper #from .interfaces import MapperProperty #assert isinstance(root_mapper, Mapper) #assert isinstance(prop, MapperProperty) if alias is not None: if isinstance(alias, str): alias = prop.target.alias(alias) adapter = sql_util.ColumnAdapter( alias, equivalents=prop.mapper._equivalent_columns) else: if path.contains(context.attributes, "path_with_polymorphic"): with_poly_info = path.get( context.attributes, "path_with_polymorphic") adapter = orm_util.ORMAdapter( with_poly_info.entity, equivalents=prop.mapper._equivalent_columns) else: adapter = context.query._polymorphic_adapters.get( prop.mapper, None) path.set( context.attributes, "user_defined_eager_row_processor", adapter) return adapter def _setup_query_on_user_defined_adapter( self, context, entity, path, adapter, user_defined_adapter): # apply some more wrapping to the "user defined adapter" # if we are setting up the query for SQL render. adapter = entity._get_entity_clauses(context.query, context) if adapter and user_defined_adapter: user_defined_adapter = user_defined_adapter.wrap(adapter) path.set( context.attributes, "user_defined_eager_row_processor", user_defined_adapter) elif adapter: user_defined_adapter = adapter path.set( context.attributes, "user_defined_eager_row_processor", user_defined_adapter) add_to_collection = context.primary_columns return user_defined_adapter, adapter, add_to_collection def _gen_pooled_aliased_class(self, context): # keep a local pool of AliasedClass objects that get re-used. # we need one unique AliasedClass per query per appearance of our # entity in the query. key = ('joinedloader_ac', self) if key not in context.attributes: context.attributes[key] = idx = 0 else: context.attributes[key] = idx = context.attributes[key] + 1 if idx >= len(self._aliased_class_pool): to_adapt = orm_util.AliasedClass( self.mapper, flat=True, use_mapper_path=True) # load up the .columns collection on the Alias() before # the object becomes shared among threads. this prevents # races for column identities. inspect(to_adapt).selectable.c self._aliased_class_pool.append(to_adapt) return self._aliased_class_pool[idx] def _generate_row_adapter( self, context, entity, path, loadopt, adapter, column_collection, parentmapper, chained_from_outerjoin): with_poly_info = path.get( context.attributes, "path_with_polymorphic", None ) if with_poly_info: to_adapt = with_poly_info.entity else: to_adapt = self._gen_pooled_aliased_class(context) clauses = inspect(to_adapt)._memo( ("joinedloader_ormadapter", self), orm_util.ORMAdapter, to_adapt, equivalents=self.mapper._equivalent_columns, adapt_required=True, allow_label_resolve=False, anonymize_labels=True ) assert clauses.aliased_class is not None if self.parent_property.uselist: context.multi_row_eager_loaders = True innerjoin = ( loadopt.local_opts.get( 'innerjoin', self.parent_property.innerjoin) if loadopt is not None else self.parent_property.innerjoin ) if not innerjoin: # if this is an outer join, all non-nested eager joins from # this path must also be outer joins chained_from_outerjoin = True context.create_eager_joins.append( ( self._create_eager_join, context, entity, path, adapter, parentmapper, clauses, innerjoin, chained_from_outerjoin ) ) add_to_collection = context.secondary_columns path.set(context.attributes, "eager_row_processor", clauses) return clauses, adapter, add_to_collection, chained_from_outerjoin def _create_eager_join( self, context, entity, path, adapter, parentmapper, clauses, innerjoin, chained_from_outerjoin): if parentmapper is None: localparent = entity.mapper else: localparent = parentmapper # whether or not the Query will wrap the selectable in a subquery, # and then attach eager load joins to that (i.e., in the case of # LIMIT/OFFSET etc.) should_nest_selectable = context.multi_row_eager_loaders and \ context.query._should_nest_selectable entity_key = None if entity not in context.eager_joins and \ not should_nest_selectable and \ context.from_clause: index, clause = sql_util.find_join_source( context.from_clause, entity.selectable) if clause is not None: # join to an existing FROM clause on the query. # key it to its list index in the eager_joins dict. # Query._compile_context will adapt as needed and # append to the FROM clause of the select(). entity_key, default_towrap = index, clause if entity_key is None: entity_key, default_towrap = entity, entity.selectable towrap = context.eager_joins.setdefault(entity_key, default_towrap) if adapter: if getattr(adapter, 'aliased_class', None): # joining from an adapted entity. The adapted entity # might be a "with_polymorphic", so resolve that to our # specific mapper's entity before looking for our attribute # name on it. efm = inspect(adapter.aliased_class).\ _entity_for_mapper( localparent if localparent.isa(self.parent) else self.parent) # look for our attribute on the adapted entity, else fall back # to our straight property onclause = getattr( efm.entity, self.key, self.parent_property) else: onclause = getattr( orm_util.AliasedClass( self.parent, adapter.selectable, use_mapper_path=True ), self.key, self.parent_property ) else: onclause = self.parent_property assert clauses.aliased_class is not None attach_on_outside = ( not chained_from_outerjoin or not innerjoin or innerjoin == 'unnested') if attach_on_outside: # this is the "classic" eager join case. eagerjoin = orm_util._ORMJoin( towrap, clauses.aliased_class, onclause, isouter=not innerjoin or ( chained_from_outerjoin and isinstance(towrap, sql.Join) ), _left_memo=self.parent, _right_memo=self.mapper ) else: # all other cases are innerjoin=='nested' approach eagerjoin = self._splice_nested_inner_join( path, towrap, clauses, onclause) context.eager_joins[entity_key] = eagerjoin # send a hint to the Query as to where it may "splice" this join eagerjoin.stop_on = entity.selectable if not parentmapper: # for parentclause that is the non-eager end of the join, # ensure all the parent cols in the primaryjoin are actually # in the # columns clause (i.e. are not deferred), so that aliasing applied # by the Query propagates those columns outward. # This has the effect # of "undefering" those columns. for col in sql_util._find_columns( self.parent_property.primaryjoin): if localparent.mapped_table.c.contains_column(col): if adapter: col = adapter.columns[col] context.primary_columns.append(col) if self.parent_property.order_by: context.eager_order_by += eagerjoin._target_adapter.\ copy_and_process( util.to_list( self.parent_property.order_by ) ) def _splice_nested_inner_join( self, path, join_obj, clauses, onclause, splicing=False): if splicing is False: # first call is always handed a join object # from the outside assert isinstance(join_obj, orm_util._ORMJoin) elif isinstance(join_obj, sql.selectable.FromGrouping): return self._splice_nested_inner_join( path, join_obj.element, clauses, onclause, splicing ) elif not isinstance(join_obj, orm_util._ORMJoin): if path[-2] is splicing: return orm_util._ORMJoin( join_obj, clauses.aliased_class, onclause, isouter=False, _left_memo=splicing, _right_memo=path[-1].mapper ) else: # only here if splicing == True return None target_join = self._splice_nested_inner_join( path, join_obj.right, clauses, onclause, join_obj._right_memo) if target_join is None: right_splice = False target_join = self._splice_nested_inner_join( path, join_obj.left, clauses, onclause, join_obj._left_memo) if target_join is None: # should only return None when recursively called, # e.g. splicing==True assert splicing is not False, \ "assertion failed attempting to produce joined eager loads" return None else: right_splice = True if right_splice: # for a right splice, attempt to flatten out # a JOIN b JOIN c JOIN .. to avoid needless # parenthesis nesting if not join_obj.isouter and not target_join.isouter: eagerjoin = join_obj._splice_into_center(target_join) else: eagerjoin = orm_util._ORMJoin( join_obj.left, target_join, join_obj.onclause, isouter=join_obj.isouter, _left_memo=join_obj._left_memo) else: eagerjoin = orm_util._ORMJoin( target_join, join_obj.right, join_obj.onclause, isouter=join_obj.isouter, _right_memo=join_obj._right_memo) eagerjoin._target_adapter = target_join._target_adapter return eagerjoin def _create_eager_adapter(self, context, result, adapter, path, loadopt): user_defined_adapter = self._init_user_defined_eager_proc( loadopt, context) if loadopt else False if user_defined_adapter is not False: decorator = user_defined_adapter # user defined eagerloads are part of the "primary" # portion of the load. # the adapters applied to the Query should be honored. if context.adapter and decorator: decorator = decorator.wrap(context.adapter) elif context.adapter: decorator = context.adapter else: decorator = path.get(context.attributes, "eager_row_processor") if decorator is None: return False if self.mapper._result_has_identity_key(result, decorator): return decorator else: # no identity key - don't return a row # processor, will cause a degrade to lazy return False def create_row_processor( self, context, path, loadopt, mapper, result, adapter, populators): if not self.parent.class_manager[self.key].impl.supports_population: raise sa_exc.InvalidRequestError( "'%s' does not support object " "population - eager loading cannot be applied." % self ) our_path = path[self.parent_property] eager_adapter = self._create_eager_adapter( context, result, adapter, our_path, loadopt) if eager_adapter is not False: key = self.key _instance = loading._instance_processor( self.mapper, context, result, our_path[self.mapper], eager_adapter) if not self.uselist: self._create_scalar_loader(context, key, _instance, populators) else: self._create_collection_loader( context, key, _instance, populators) else: self.parent_property._get_strategy((("lazy", "select"),)).\ create_row_processor( context, path, loadopt, mapper, result, adapter, populators) def _create_collection_loader(self, context, key, _instance, populators): def load_collection_from_joined_new_row(state, dict_, row): collection = attributes.init_state_collection( state, dict_, key) result_list = util.UniqueAppender(collection, 'append_without_event') context.attributes[(state, key)] = result_list inst = _instance(row) if inst is not None: result_list.append(inst) def load_collection_from_joined_existing_row(state, dict_, row): if (state, key) in context.attributes: result_list = context.attributes[(state, key)] else: # appender_key can be absent from context.attributes # with isnew=False when self-referential eager loading # is used; the same instance may be present in two # distinct sets of result columns collection = attributes.init_state_collection( state, dict_, key) result_list = util.UniqueAppender( collection, 'append_without_event') context.attributes[(state, key)] = result_list inst = _instance(row) if inst is not None: result_list.append(inst) def load_collection_from_joined_exec(state, dict_, row): _instance(row) populators["new"].append((self.key, load_collection_from_joined_new_row)) populators["existing"].append( (self.key, load_collection_from_joined_existing_row)) if context.invoke_all_eagers: populators["eager"].append( (self.key, load_collection_from_joined_exec)) def _create_scalar_loader(self, context, key, _instance, populators): def load_scalar_from_joined_new_row(state, dict_, row): # set a scalar object instance directly on the parent # object, bypassing InstrumentedAttribute event handlers. dict_[key] = _instance(row) def load_scalar_from_joined_existing_row(state, dict_, row): # call _instance on the row, even though the object has # been created, so that we further descend into properties existing = _instance(row) # conflicting value already loaded, this shouldn't happen if key in dict_: if existing is not dict_[key]: util.warn( "Multiple rows returned with " "uselist=False for eagerly-loaded attribute '%s' " % self) else: # this case is when one row has multiple loads of the # same entity (e.g. via aliasing), one has an attribute # that the other doesn't. dict_[key] = existing def load_scalar_from_joined_exec(state, dict_, row): _instance(row) populators["new"].append((self.key, load_scalar_from_joined_new_row)) populators["existing"].append( (self.key, load_scalar_from_joined_existing_row)) if context.invoke_all_eagers: populators["eager"].append((self.key, load_scalar_from_joined_exec)) def single_parent_validator(desc, prop): def _do_check(state, value, oldvalue, initiator): if value is not None and initiator.key == prop.key: hasparent = initiator.hasparent(attributes.instance_state(value)) if hasparent and oldvalue is not value: raise sa_exc.InvalidRequestError( "Instance %s is already associated with an instance " "of %s via its %s attribute, and is only allowed a " "single parent." % (orm_util.instance_str(value), state.class_, prop) ) return value def append(state, value, initiator): return _do_check(state, value, None, initiator) def set_(state, value, oldvalue, initiator): return _do_check(state, value, oldvalue, initiator) event.listen( desc, 'append', append, raw=True, retval=True, active_history=True) event.listen( desc, 'set', set_, raw=True, retval=True, active_history=True)
{ "content_hash": "1217ce62b03a74d6eb498d4a9ef1ecf0", "timestamp": "", "source": "github", "line_count": 1708, "max_line_length": 81, "avg_line_length": 36.38875878220141, "alnum_prop": 0.5657098725704723, "repo_name": "israeleriston/scientific-week", "id": "d35b956875cfde9e0c5fb73b4cb7d91ed889dbec", "size": "62390", "binary": false, "copies": "15", "ref": "refs/heads/master", "path": "backend/venv/lib/python3.5/site-packages/sqlalchemy/orm/strategies.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "5006" }, { "name": "C", "bytes": "5936" }, { "name": "CSS", "bytes": "17040" }, { "name": "HTML", "bytes": "59753" }, { "name": "Java", "bytes": "404421" }, { "name": "JavaScript", "bytes": "177665" }, { "name": "Python", "bytes": "8420848" }, { "name": "Scala", "bytes": "16683" }, { "name": "Shell", "bytes": "10370" }, { "name": "Vue", "bytes": "80409" } ], "symlink_target": "" }
from django.contrib import admin from .models import Profile class ProfileAdmin(admin.ModelAdmin): search_fields = ('user__username', 'user__first_name', 'user__last_name', 'user__email') list_display = ('__str__', 'is_pending', 'is_confirmed') list_filter = ('is_pending', 'is_confirmed', 'user__is_staff', 'user__is_superuser') readonly_fields = ('user', 'consent') admin.site.register(Profile, ProfileAdmin)
{ "content_hash": "98aeb18c2e6fa3fad5ba55b330d79d37", "timestamp": "", "source": "github", "line_count": 13, "max_line_length": 92, "avg_line_length": 33.23076923076923, "alnum_prop": 0.6759259259259259, "repo_name": "aipescience/django-daiquiri", "id": "4fdccb3e8322cb50a7767f9092299e05e4cf5972", "size": "432", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "daiquiri/auth/admin.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "28598" }, { "name": "HTML", "bytes": "236579" }, { "name": "JavaScript", "bytes": "97087" }, { "name": "Python", "bytes": "602159" } ], "symlink_target": "" }
import importlib import inspect import sys import re import libcloud """ import cherrypy class server( object ): def index( self ): return "HI" index.exposed = True cherrypy.quickstart( server() ) """ def get_argspecs( what ): """ Iterate through the particular drivers. what can currently be 'compute' 'dns' 'loadbalancer' 'storage' """ _return = [ ] # Import the base module and the drivers. importlib.import_module( "libcloud.{0}".format(what) ) importlib.import_module( "libcloud.{0}.drivers".format(what) ) _drivers = __import__( "libcloud.{0}.drivers".format(what), globals(), locals(), [ "*" ] ) for driver_name,driver_obj in _drivers.__dict__.items(): print driver_name if not "libcloud.{0}.drivers.{1}".format(what,driver_name) in sys.modules: continue for member_name, member_val in inspect.getmembers( driver_obj, inspect.isclass ): # Regex match for /.*NodeDriver$/ .. if not re.match( ".*NodeDriver$", member_name ): continue # Get the spec for the node drivers __init__ .. arg_spec = inspect.getargspec( getattr( member_val, "__init__" ) ) # Quick hack. See LIBCLOUD-405 for why this exists. if len( arg_spec[0] ) == 1: continue print "I have {0}.{1}".format( driver_name, member_name ) return _return #print get_argspecs( "compute" ) #print get_argspecs( "dns" ) print get_argspecs( "loadbalancer" )
{ "content_hash": "ee8bd3d181e5020d1be4d3e7b6b34832", "timestamp": "", "source": "github", "line_count": 62, "max_line_length": 91, "avg_line_length": 22.483870967741936, "alnum_prop": 0.6642754662840746, "repo_name": "robertkeizer/cloud-gui", "id": "092a49a40159b6751ad4536425133e3e7225c5c5", "size": "1394", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "main.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "1394" } ], "symlink_target": "" }
""" Global tasks. :copyright: Copyright (c) 2014 Bivio Software, Inc. All Rights Reserved. :license: Apache, see LICENSE for more details. """ import werkzeug import flask from . import oauth from .. import controller from ..auth import model as pam from . import model as pgm class General(controller.Task): """Global tasks""" def action_index(biv_obj): """Site index""" redirect = controller.app().config['PUBLICPRIZE']['INDEX_URI'] return flask.redirect(redirect) def action_facebook_login(biv_obj): """Login with facebook.""" return oauth.authorize( 'facebook', biv_obj.format_absolute_uri('facebook-authorized') ) def action_facebook_authorized(biv_obj): """Facebook login response""" return oauth.authorize_complete('facebook') def action_forbidden(biv_obj): """Forbidden page""" return flask.render_template('general/forbidden.html'), 403 def action_google_login(biv_obj): """Login with google.""" return oauth.authorize( 'google', biv_obj.format_absolute_uri('google-authorized') ) def action_google_authorized(biv_obj): """Google login response""" return oauth.authorize_complete('google') def action_linkedin_authorized(biv_obj): """LinkedIn login response""" return oauth.authorize_complete('linkedin') def action_linkedin_login(biv_obj): """Login with google.""" return oauth.authorize( 'linkedin', biv_obj.format_absolute_uri('linkedin-authorized') ) def action_login(biv_obj): """Show login options.""" return flask.render_template( "general/login.html", ) def action_logout(biv_obj): """Logout""" oauth.logout() flask.flash('You have successfully logged out.') return flask.redirect('/') def action_not_found(biv_obj): """Not found page""" return flask.render_template('general/not-found.html'), 404 def action_new_test_admin(biv_obj): """Create a new test user, logs in, sets Admin status.""" return General._user(biv_obj, pgm.General.new_test_admin) def action_new_test_user(biv_obj): """Creates a new test user model and log in.""" return General._user(biv_obj, pgm.General.new_test_user) def action_privacy(biv_obj): return flask.redirect('/static/pdf/privacy.pdf') def action_terms(biv_obj): return flask.redirect('/static/pdf/terms.pdf') def action_test_login(biv_obj): if not controller.app().config['PUBLICPRIZE']['TEST_USER']: raise Exception("TEST_USER not enabled") return General.action_login(biv_obj) def action_vote(biv_obj): return flask.redirect('/esprit-venture-challenge#/vote'); def _user(contest, op): user = op(contest) oauth.add_user_to_session(user) return flask.redirect('/')
{ "content_hash": "9320e1d8680878d7f2b9634555bfbb22", "timestamp": "", "source": "github", "line_count": 100, "max_line_length": 77, "avg_line_length": 30.45, "alnum_prop": 0.6164203612479474, "repo_name": "biviosoftware/publicprize", "id": "1932091d4c729d953beb11811a81a126aa46781d", "size": "3069", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "publicprize/general/task.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "431548" }, { "name": "HTML", "bytes": "48930" }, { "name": "JavaScript", "bytes": "1404923" }, { "name": "Python", "bytes": "186044" } ], "symlink_target": "" }
''' New Integration Test for Multi-ISO. @author: Legion ''' import zstackwoodpecker.test_util as test_util import zstackwoodpecker.test_lib as test_lib import zstackwoodpecker.test_state as test_state import time test_obj_dict = test_state.TestStateDict() test_stub = test_lib.lib_get_test_stub() multi_iso = test_stub.MulISO() def test(): multi_iso.add_iso_image() multi_iso.create_vm() test_obj_dict.add_vm(multi_iso.vm1) multi_iso.get_all_iso_uuids() multi_iso.attach_iso(multi_iso.iso_uuids[0]) multi_iso.check_vm_cdrom(2, True) multi_iso.attach_iso(multi_iso.iso_uuids[1]) multi_iso.check_vm_cdrom(1, True) multi_iso.detach_iso(multi_iso.iso_uuids[0]) multi_iso.check_vm_cdrom() multi_iso.check_vm_cdrom(2, True) multi_iso.detach_iso(multi_iso.iso_uuids[1]) multi_iso.check_vm_cdrom() multi_iso.check_vm_cdrom(3, True) test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('Attach 2 ISO Test Success') #Will be called only if exception happens in test(). def error_cleanup(): global test_obj_dict test_lib.lib_error_cleanup(test_obj_dict)
{ "content_hash": "6f4214b81a874e4823d1936474e28bab", "timestamp": "", "source": "github", "line_count": 41, "max_line_length": 52, "avg_line_length": 27.634146341463413, "alnum_prop": 0.703442188879082, "repo_name": "zstackorg/zstack-woodpecker", "id": "3fbe0628c1c1b5f5b60524015a809775a4a44362", "size": "1133", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "integrationtest/vm/virtualrouter/multi_iso/test_attach_2_iso.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Go", "bytes": "46522" }, { "name": "Makefile", "bytes": "692" }, { "name": "Puppet", "bytes": "875" }, { "name": "Python", "bytes": "2891030" }, { "name": "Shell", "bytes": "54266" } ], "symlink_target": "" }
from django.conf.urls import patterns, url, include from api import views urlpatterns = patterns( '', url(r'^', include(views.router.urls)), )
{ "content_hash": "47b1f5e3ba2d6decb7b4271d15b3fd16", "timestamp": "", "source": "github", "line_count": 9, "max_line_length": 51, "avg_line_length": 17.11111111111111, "alnum_prop": 0.6818181818181818, "repo_name": "msluis/stage-assistant", "id": "b6bdaca0ec4c42cf1fe76c13f7c6cadb8360d632", "size": "154", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "server/src/api/urls.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "271" }, { "name": "HTML", "bytes": "12969" }, { "name": "JavaScript", "bytes": "16128" }, { "name": "Python", "bytes": "11364" }, { "name": "Ruby", "bytes": "306" } ], "symlink_target": "" }
""" Make this module itself executable as an alias for invoke. """ import sys import subprocess cmd = ['invoke'] if len(sys.argv) == 1: cmd.append('help') else: cmd.extend(sys.argv[1:]) subprocess.check_call(cmd)
{ "content_hash": "b0e60ca5ae45afdab1170b3b0f4eb44a", "timestamp": "", "source": "github", "line_count": 14, "max_line_length": 58, "avg_line_length": 16, "alnum_prop": 0.6741071428571429, "repo_name": "jrversteegh/flexx", "id": "523ac42185c6933b1211925a5f21b08ff4b931c2", "size": "224", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "tasks/__main__.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "CSS", "bytes": "150" }, { "name": "JavaScript", "bytes": "26636" }, { "name": "Python", "bytes": "978605" } ], "symlink_target": "" }
"""Benchmark for KPL implementation of vocabulary columns from lists with dense inputs.""" import tensorflow.compat.v2 as tf import keras from keras.layers.preprocessing import string_lookup from keras.layers.preprocessing.benchmarks import ( feature_column_benchmark as fc_bm, ) # isort: off from tensorflow.python.eager.def_function import ( function as tf_function, ) NUM_REPEATS = 10 BATCH_SIZES = [32, 256] def embedding_varlen(batch_size, max_length): """Benchmark a variable-length embedding.""" # Data and constants. vocab = fc_bm.create_vocabulary(32768) data = fc_bm.create_string_data( max_length, batch_size * NUM_REPEATS, vocab, pct_oov=0.15 ) # Keras implementation model = keras.Sequential() model.add(keras.Input(shape=(max_length,), name="data", dtype=tf.string)) model.add(string_lookup.StringLookup(vocabulary=vocab, mask_token=None)) # FC implementation fc = tf.feature_column.categorical_column_with_vocabulary_list( key="data", vocabulary_list=vocab, num_oov_buckets=1 ) # Wrap the FC implementation in a tf.function for a fair comparison @tf_function() def fc_fn(tensors): fc.transform_feature( tf.__internal__.feature_column.FeatureTransformationCache(tensors), None, ) # Benchmark runs keras_data = { "data": data.to_tensor(default_value="", shape=(batch_size, max_length)) } k_avg_time = fc_bm.run_keras(keras_data, model, batch_size, NUM_REPEATS) fc_data = { "data": data.to_tensor(default_value="", shape=(batch_size, max_length)) } fc_avg_time = fc_bm.run_fc(fc_data, fc_fn, batch_size, NUM_REPEATS) return k_avg_time, fc_avg_time class BenchmarkLayer(fc_bm.LayerBenchmark): """Benchmark the layer forward pass.""" def benchmark_layer(self): for batch in BATCH_SIZES: name = f"vocab_list|dense|batch_{batch}" k_time, f_time = embedding_varlen(batch_size=batch, max_length=256) self.report(name, k_time, f_time, NUM_REPEATS) if __name__ == "__main__": tf.test.main()
{ "content_hash": "40b39748f7b02032333966a9777aa166", "timestamp": "", "source": "github", "line_count": 72, "max_line_length": 80, "avg_line_length": 29.72222222222222, "alnum_prop": 0.6616822429906543, "repo_name": "keras-team/keras", "id": "eb455a8e52bc4bd2727211d56d1b181318f0bed7", "size": "2829", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "keras/layers/preprocessing/benchmarks/category_vocab_list_dense_benchmark.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "900" }, { "name": "Python", "bytes": "11342063" }, { "name": "Shell", "bytes": "11489" }, { "name": "Starlark", "bytes": "273139" } ], "symlink_target": "" }
from stream_framework.tests.feeds.base import TestBaseFeed, implementation import pytest from stream_framework.feeds.cassandra import CassandraFeed from stream_framework.utils import datetime_to_epoch from stream_framework.activity import Activity class CustomActivity(Activity): @property def serialization_id(self): ''' Shorter serialization id than used by default ''' if self.object_id >= 10 ** 10 or self.verb.id >= 10 ** 3: raise TypeError('Fatal: object_id / verb have too many digits !') if not self.time: raise TypeError('Cant serialize activities without a time') milliseconds = str(int(datetime_to_epoch(self.time) * 1000)) # shorter than the default version serialization_id_str = '%s%0.2d%0.2d' % ( milliseconds, self.object_id % 100, self.verb.id) serialization_id = int(serialization_id_str) return serialization_id class CassandraCustomFeed(CassandraFeed): activity_class = CustomActivity @pytest.mark.usefixtures("cassandra_reset") class TestCassandraBaseFeed(TestBaseFeed): feed_cls = CassandraFeed def test_add_insert_activity(self): pass def test_add_remove_activity(self): pass @pytest.mark.usefixtures("cassandra_reset") class TestCassandraCustomFeed(TestBaseFeed): feed_cls = CassandraCustomFeed activity_class = CustomActivity def test_add_insert_activity(self): pass def test_add_remove_activity(self): pass @implementation def test_custom_activity(self): assert self.test_feed.count() == 0 self.feed_cls.insert_activity( self.activity ) self.test_feed.add(self.activity) assert self.test_feed.count() == 1 assert self.activity == self.test_feed[:10][0] assert type(self.activity) == type(self.test_feed[0][0]) # make sure nothing is wrong with the activity storage
{ "content_hash": "073c584bf2b2c74f55ca452b90d884c5", "timestamp": "", "source": "github", "line_count": 65, "max_line_length": 77, "avg_line_length": 30.446153846153845, "alnum_prop": 0.6705406771096514, "repo_name": "turbolabtech/Stream-Framework", "id": "f5ec0d89ca54c3ae238a658bb6aeec1f08e53772", "size": "1979", "binary": false, "copies": "9", "ref": "refs/heads/master", "path": "stream_framework/tests/feeds/cassandra.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "HTML", "bytes": "86131" }, { "name": "Nginx", "bytes": "1796" }, { "name": "Pascal", "bytes": "1113" }, { "name": "Puppet", "bytes": "76558" }, { "name": "Python", "bytes": "282764" }, { "name": "Ruby", "bytes": "259164" }, { "name": "Shell", "bytes": "8427" } ], "symlink_target": "" }
""" Most descriptor compounds selection """ # Author: Giuseppe Marco Randazzo gmrandazzo@gmail.com # License: BSD 3 clause from numpy import zeros, array class MDC(object): """Perform Most-Descriptor-Compound object selection Parameters ---------- dmx : array, shape(row,row) A square distance matrix. To build a distance matrix see scipy at: http://docs.scipy.org/doc/scipy/reference/spatial.distance.html nobjects : int, optional, default: 0 Number of object to select. 0 means an autostop criterion. Attributes ---------- info_ : array, shape (row_,) Information Vector to select the mdc Returns ------ mdcids: list Return the list of id selected from the algorithm. Notes ----- See examples/plot_mdc_example.py for an example. References ---------- Brian D. Hudson, Richard M. Hyde, Elizabeth Rahr and John Wood, Parameter Based Methods for Compound Selection from Chemical Databases, Quant. Struct. Act. Relat. j. 185-289 1996 """ def __init__(self, dmx, nobjects=0): try: self.dmx_ = dmx.tolist() #convert to list to be faster except AttributeError: self.dmx_ = dmx self.nobjects = nobjects self.info_ = None self._build_infovector() self.mdcids = [] def mdclist(self): """ Return the list of most descriptor compounds """ return self.mdcids def getnext(self): """ Get the next most descriptor compound """ self._appendnext() return self.mdcids[-1] def select(self): """ Run the Most Descriptive Compound Selection """ stopcondition = True while stopcondition: self._appendnext() self._rm_mdc_contrib() # Check Stop Condition if self.nobjects > 0: if len(self.mdcids) == len(self.dmx_): stopcondition = False else: if len(self.mdcids) < self.nobjects: continue else: stopcondition = False else: ncheck = 0 for item in self.info_: if item < 1: ncheck += 1 else: continue if ncheck > len(self.mdcids): stopcondition = False return self.mdcids def _build_infovector(self): """ build the information vector """ row = len(self.dmx_) self.info_ = zeros(row) tmp = zeros((row, 2)) for i in range(row): for j in range(row): tmp[j][0] = self.dmx_[i][j] tmp[j][1] = j tmp = array(sorted(tmp, key=lambda item: item[0])) # Reciprocal of the rank div = 2.0 for j in range(row): if j == i: self.info_[j] += 1 else: k = int(tmp[j][1]) self.info_[k] += 1/div div += 1.0 def _appendnext(self): """ Append the next most descriptive compound to list """ dist = self.info_[0] mdc = 0 # Select the MDC with the major information for i in range(1, len(self.info_)): if self.info_[i] > dist: dist = self.info_[i] mdc = i else: continue self.mdcids.append(mdc) def _rm_mdc_contrib(self): """ remove the most descriptive compound contribution """ mdc = self.mdcids[-1] row = len(self.dmx_) tmp = zeros((row, 2)) rank = zeros(row) for j in range(row): tmp[j][0] = self.dmx_[mdc][j] tmp[j][1] = j tmp = array(sorted(tmp, key=lambda item: item[0])) div = 2.0 for i in range(row): j = int(tmp[i][1]) if j == mdc: rank[j] = 0.0 else: rank[j] = 1.0 - (1.0/div) div += 1.0 for i in range(row): self.info_[i] *= rank[i]
{ "content_hash": "0a9b67b564c1037dffffbdb5e9f01e68", "timestamp": "", "source": "github", "line_count": 153, "max_line_length": 75, "avg_line_length": 27.764705882352942, "alnum_prop": 0.4905838041431262, "repo_name": "zeld/scikit-optobj", "id": "f40c275987990aeffec93533a105aa70976053aa", "size": "4248", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "optobj/mdc.py", "mode": "33261", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "18122" } ], "symlink_target": "" }
def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration from numpy.distutils.system_info import get_info config = Configuration('blas',parent_package,top_path) config.add_sconscript('SConstruct') config.add_data_dir('tests') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict())
{ "content_hash": "4d143fea82c348deb9f7c6a6d38a6f71", "timestamp": "", "source": "github", "line_count": 14, "max_line_length": 58, "avg_line_length": 31, "alnum_prop": 0.7027649769585254, "repo_name": "teoliphant/scipy", "id": "2df548879738e2f2835fac16c1657415291ad3a5", "size": "457", "binary": false, "copies": "12", "ref": "refs/heads/master", "path": "scipy/lib/blas/setupscons.py", "mode": "33261", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "11530901" }, { "name": "C++", "bytes": "7695320" }, { "name": "FORTRAN", "bytes": "5898903" }, { "name": "Matlab", "bytes": "1861" }, { "name": "Objective-C", "bytes": "137083" }, { "name": "Python", "bytes": "5863600" }, { "name": "Shell", "bytes": "1793" } ], "symlink_target": "" }
"""RegisterValidator example.""" import gflags FLAGS = gflags.FLAGS gflags.DEFINE_integer('my_version', 0, 'Version number.') gflags.DEFINE_string('filename', None, 'Input file name', short_name='f') gflags.RegisterValidator('my_version', lambda value: value % 2 == 0, message='--my_version must be divisible by 2') gflags.MarkFlagAsRequired('filename')
{ "content_hash": "a8214a6467c830f4ac438d5cfb837e0d", "timestamp": "", "source": "github", "line_count": 13, "max_line_length": 73, "avg_line_length": 31.23076923076923, "alnum_prop": 0.6477832512315271, "repo_name": "gsutil-mirrors/python-gflags", "id": "2bef217d284d0553fabf033109d68c7705036e48", "size": "428", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "examples/validator.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Makefile", "bytes": "1508" }, { "name": "Python", "bytes": "188600" } ], "symlink_target": "" }
"""Parser for MacOS utmpx files.""" import os from dfdatetime import posix_time as dfdatetime_posix_time from plaso.containers import events from plaso.lib import definitions from plaso.lib import dtfabric_helper from plaso.lib import errors from plaso.lib import specification from plaso.parsers import interface from plaso.parsers import manager class UtmpxMacOSEventData(events.EventData): """MacOS utmpx event data. Attributes: hostname (str): hostname or IP address. offset (int): offset of the utmpx record relative to the start of the file, from which the event data was extracted. pid (int): process identifier (PID). terminal (str): name of the terminal. terminal_identifier (int): inittab identifier. type (int): type of login. username (str): user name. written_time (dfdatetime.DateTimeValues): entry written date and time. """ DATA_TYPE = 'macos:utmpx:entry' def __init__(self): """Initializes event data.""" super(UtmpxMacOSEventData, self).__init__(data_type=self.DATA_TYPE) self.hostname = None self.offset = None self.pid = None self.terminal = None self.terminal_identifier = None self.type = None self.username = None self.written_time = None class UtmpxParser(interface.FileObjectParser, dtfabric_helper.DtFabricHelper): """Parser for Mac OS X 10.5 utmpx files.""" NAME = 'utmpx' DATA_FORMAT = 'Mac OS X 10.5 utmpx file' _DEFINITION_FILE = os.path.join( os.path.dirname(__file__), 'utmp.yaml') _SUPPORTED_TYPES = frozenset(range(0, 12)) _FILE_HEADER_USERNAME = 'utmpx-1.00' _FILE_HEADER_TYPE = 10 def _ReadEntry(self, parser_mediator, file_object, file_offset): """Reads an utmpx entry. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): a file-like object. file_offset (int): offset of the data relative from the start of the file-like object. Returns: UtmpxMacOSEventData: event data of the utmpx entry read. Raises: ParseError: if the entry cannot be parsed. """ entry_map = self._GetDataTypeMap('macosx_utmpx_entry') try: entry, _ = self._ReadStructureFromFileObject( file_object, file_offset, entry_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError(( 'Unable to parse utmpx entry at offset: 0x{0:08x} with error: ' '{1!s}.').format(file_offset, exception)) if entry.type not in self._SUPPORTED_TYPES: raise errors.ParseError('Unsupported type: {0:d}'.format(entry.type)) encoding = parser_mediator.codepage or 'utf8' try: username = entry.username.split(b'\x00')[0] username = username.decode(encoding).rstrip() except UnicodeDecodeError: parser_mediator.ProduceExtractionWarning( 'unable to decode username string') username = None try: terminal = entry.terminal.split(b'\x00')[0] terminal = terminal.decode(encoding).rstrip() except UnicodeDecodeError: parser_mediator.ProduceExtractionWarning( 'unable to decode terminal string') terminal = None if terminal == '~': terminal = 'system boot' try: hostname = entry.hostname.split(b'\x00')[0] hostname = hostname.decode(encoding).rstrip() except UnicodeDecodeError: parser_mediator.ProduceExtractionWarning( 'unable to decode hostname string') hostname = None if not hostname: hostname = 'localhost' timestamp = entry.microseconds + ( entry.timestamp * definitions.MICROSECONDS_PER_SECOND) event_data = UtmpxMacOSEventData() event_data.hostname = hostname event_data.pid = entry.pid event_data.offset = file_offset event_data.terminal = terminal or None event_data.terminal_identifier = entry.terminal_identifier event_data.type = entry.type event_data.username = username or None event_data.written_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=timestamp) return event_data @classmethod def GetFormatSpecification(cls): """Retrieves the format specification. Returns: FormatSpecification: format specification. """ format_specification = specification.FormatSpecification(cls.NAME) format_specification.AddNewSignature(b'utmpx-1.00\x00', offset=0) return format_specification def ParseFileObject(self, parser_mediator, file_object): """Parses an UTMPX file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): a file-like object. Raises: WrongParser: when the file cannot be parsed. """ file_offset = 0 try: event_data = self._ReadEntry(parser_mediator, file_object, file_offset) except errors.ParseError as exception: raise errors.WrongParser( 'Unable to parse utmpx file header with error: {0!s}'.format( exception)) if event_data.username != self._FILE_HEADER_USERNAME: raise errors.WrongParser( 'Unable to parse utmpx file header with error: unsupported username') if event_data.type != self._FILE_HEADER_TYPE: raise errors.WrongParser( 'Unable to parse utmp file header with error: unsupported type of ' 'login') file_offset = file_object.tell() file_size = file_object.get_size() while file_offset < file_size: if parser_mediator.abort: break try: event_data = self._ReadEntry(parser_mediator, file_object, file_offset) except errors.ParseError: break parser_mediator.ProduceEventData(event_data) file_offset = file_object.tell() manager.ParsersManager.RegisterParser(UtmpxParser)
{ "content_hash": "3337e1e21347f24008d6576ae72fadd9", "timestamp": "", "source": "github", "line_count": 194, "max_line_length": 79, "avg_line_length": 30.804123711340207, "alnum_prop": 0.6825635876840697, "repo_name": "joachimmetz/plaso", "id": "e49b8338f8b288197bae8fe51aef9caa1dd6dcfa", "size": "6000", "binary": false, "copies": "2", "ref": "refs/heads/main", "path": "plaso/parsers/utmpx.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "4301" }, { "name": "Makefile", "bytes": "122" }, { "name": "PowerShell", "bytes": "1305" }, { "name": "Python", "bytes": "5345755" }, { "name": "Shell", "bytes": "27279" }, { "name": "YARA", "bytes": "507" } ], "symlink_target": "" }
import datetime from south.db import db from south.v2 import DataMigration from django.db import models from django.core.exceptions import ObjectDoesNotExist class Migration(DataMigration): def forwards(self, orm): for i in orm.Item.objects.all(): try: i.status.get(name='Verified') i.verified = True i.save() except ObjectDoesNotExist: pass try: verif = orm.Status.objects.get(name='Verified') verif.delete() except: pass def backwards(self, orm): try: verif = orm.Status.objects.get(name='Verified') except ObjectDoesNotExist: verif = orm.Status(name='Verified', description='This machine has been double-checked for all pertinent information.') verif.save() for i in orm.Item.objects.all(): if i.verified: i.status.add(verif) i.verified = False i.save() models = { 'LabtrackerCore.group': { 'Meta': {'object_name': 'Group'}, 'description': ('django.db.models.fields.CharField', [], {'max_length': '2616'}), 'group_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'it': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['LabtrackerCore.InventoryType']", 'null': 'True', 'blank': 'True'}), 'items': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['LabtrackerCore.Item']", 'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}) }, 'LabtrackerCore.inventorytype': { 'Meta': {'object_name': 'InventoryType'}, 'description': ('django.db.models.fields.CharField', [], {'max_length': '2616'}), 'inv_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}), 'namespace': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}) }, 'LabtrackerCore.item': { 'Meta': {'object_name': 'Item'}, 'it': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['LabtrackerCore.InventoryType']"}), 'item_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}) }, 'LabtrackerCore.labuser': { 'Meta': {'object_name': 'LabUser'}, 'accesses': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'user_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'primary_key': 'True'}) }, 'Machine.contact': { 'Meta': {'object_name': 'Contact'}, 'contact_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_primary': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'mg': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['Machine.Group']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'Machine.group': { 'Meta': {'object_name': 'Group', '_ormbases': ['LabtrackerCore.Group']}, 'casting_server': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}), 'core': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['LabtrackerCore.Group']", 'unique': 'True', 'primary_key': 'True'}), 'gateway': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}), 'is_lab': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) }, 'Machine.history': { 'Meta': {'object_name': 'History'}, 'login_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'machine': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['Machine.Item']"}), 'mh_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['Machine.Status']", 'null': 'True', 'symmetrical': 'False'}), 'session_time': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '2', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['LabtrackerCore.LabUser']"}) }, 'Machine.item': { 'Meta': {'object_name': 'Item', '_ormbases': ['LabtrackerCore.Item']}, 'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'core': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['LabtrackerCore.Item']", 'unique': 'True', 'primary_key': 'True'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}), 'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['Machine.Location']"}), 'mac1': ('django.db.models.fields.CharField', [], {'max_length': '17'}), 'mac2': ('django.db.models.fields.CharField', [], {'max_length': '17', 'null': 'True', 'blank': 'True'}), 'manu_tag': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'purchase_date': ('django.db.models.fields.DateField', [], {'null': 'True'}), 'status': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'machine_status'", 'symmetrical': 'False', 'to': "orm['Machine.Status']"}), 'stf_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['Machine.Type']"}), 'uw_tag': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'wall_port': ('django.db.models.fields.CharField', [], {'max_length': '25'}), 'warranty_date': ('django.db.models.fields.DateField', [], {'null': 'True'}) }, 'Machine.location': { 'Meta': {'object_name': 'Location'}, 'building': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}), 'comment': ('django.db.models.fields.CharField', [], {'max_length': '600'}), 'floor': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}), 'ml_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}), 'room': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}) }, 'Machine.platform': { 'Meta': {'object_name': 'Platform'}, 'description': ('django.db.models.fields.CharField', [], {'max_length': '400', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}), 'platform_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, 'Machine.status': { 'Meta': {'unique_together': "(('ms_id', 'name'),)", 'object_name': 'Status'}, 'description': ('django.db.models.fields.CharField', [], {'max_length': '400', 'blank': 'True'}), 'ms_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '60', 'db_index': 'True'}) }, 'Machine.type': { 'Meta': {'object_name': 'Type'}, 'description': ('django.db.models.fields.CharField', [], {'max_length': '400', 'blank': 'True'}), 'model_name': ('django.db.models.fields.CharField', [], {'max_length': '60'}), 'mt_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}), 'platform': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['Machine.Platform']"}), 'specs': ('django.db.models.fields.TextField', [], {}) }, 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) } } complete_apps = ['Machine']
{ "content_hash": "6794e3789ad22a02caaad348838fe119", "timestamp": "", "source": "github", "line_count": 178, "max_line_length": 182, "avg_line_length": 67.70224719101124, "alnum_prop": 0.5396232677786076, "repo_name": "abztrakt/labtracker", "id": "7eadbc8033112b3e2e5fc04b4069130d48584b07", "size": "12069", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Machine/migrations/0003_move_verified_status.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "JavaScript", "bytes": "79222" }, { "name": "Python", "bytes": "550215" }, { "name": "Shell", "bytes": "4496" } ], "symlink_target": "" }
import _plotly_utils.basevalidators class DtickValidator(_plotly_utils.basevalidators.AnyValidator): def __init__( self, plotly_name="dtick", parent_name="parcats.line.colorbar", **kwargs ): super(DtickValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "colorbars"), implied_edits=kwargs.pop("implied_edits", {"tickmode": "linear"}), **kwargs, )
{ "content_hash": "b3553a4fb92adac554ee5a70812cd2d6", "timestamp": "", "source": "github", "line_count": 14, "max_line_length": 80, "avg_line_length": 35.714285714285715, "alnum_prop": 0.604, "repo_name": "plotly/plotly.py", "id": "b747c42782a9cc98ee291dca99264e3c43199442", "size": "500", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "packages/python/plotly/plotly/validators/parcats/line/colorbar/_dtick.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "545" }, { "name": "JavaScript", "bytes": "2074" }, { "name": "PostScript", "bytes": "565328" }, { "name": "Python", "bytes": "31506317" }, { "name": "TypeScript", "bytes": "71337" } ], "symlink_target": "" }
import unittest import threading import tempfile import shutil from electrum_ltc import constants # Set this locally to make the test suite run faster. # If set, unit tests that would normally test functions with multiple implementations, # will only be run once, using the fastest implementation. # e.g. libsecp256k1 vs python-ecdsa. pycryptodomex vs pyaes. FAST_TESTS = False # some unit tests are modifying globals... class SequentialTestCase(unittest.TestCase): test_lock = threading.Lock() def setUp(self): super().setUp() self.test_lock.acquire() def tearDown(self): super().tearDown() self.test_lock.release() class ElectrumTestCase(SequentialTestCase): """Base class for our unit tests.""" def setUp(self): super().setUpClass() self.electrum_path = tempfile.mkdtemp() def tearDown(self): super().tearDownClass() shutil.rmtree(self.electrum_path) class TestCaseForTestnet(ElectrumTestCase): @classmethod def setUpClass(cls): super().setUpClass() constants.set_testnet() @classmethod def tearDownClass(cls): super().tearDownClass() constants.set_mainnet()
{ "content_hash": "72b5911cd7343be0314bd9695ac6f755", "timestamp": "", "source": "github", "line_count": 52, "max_line_length": 86, "avg_line_length": 23.346153846153847, "alnum_prop": 0.6861614497528831, "repo_name": "vialectrum/vialectrum", "id": "6e26471e7970c5214f9c06794c13c66f7b019325", "size": "1214", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "electrum_ltc/tests/__init__.py", "mode": "33188", "license": "mit", "language": [ { "name": "GLSL", "bytes": "289" }, { "name": "Java", "bytes": "1574" }, { "name": "Makefile", "bytes": "839" }, { "name": "NSIS", "bytes": "7496" }, { "name": "Python", "bytes": "1895270" }, { "name": "Shell", "bytes": "16219" } ], "symlink_target": "" }
from __future__ import absolute_import, print_function import collections import copy import os import sys from contextlib import contextmanager from distutils.sysconfig import get_python_lib import attr import pip_shims import six from cached_property import cached_property from packaging.markers import Marker from packaging.requirements import Requirement as PackagingRequirement from packaging.specifiers import ( InvalidSpecifier, LegacySpecifier, Specifier, SpecifierSet, ) from packaging.utils import canonicalize_name from six.moves.urllib import parse as urllib_parse from six.moves.urllib.parse import unquote from vistir.compat import FileNotFoundError, Path, lru_cache from vistir.contextmanagers import temp_path from vistir.misc import dedup from vistir.path import ( create_tracked_tempdir, get_converted_relative_path, is_file_url, is_valid_url, mkdir_p, normalize_path, ) from ..environment import MYPY_RUNNING from ..exceptions import RequirementError from ..utils import ( VCS_LIST, add_ssh_scheme_to_git_uri, get_setup_paths, is_installable_dir, is_installable_file, is_vcs, strip_ssh_from_git_uri, ) from .markers import ( normalize_marker_str, ) from .setup_info import ( SetupInfo, _prepare_wheel_building_kwargs, ast_parse_setup_py, get_metadata, parse_setup_cfg, ) from .url import URI from .utils import ( DIRECT_URL_RE, HASH_STRING, build_vcs_uri, convert_direct_url_to_url, create_link, expand_env_variables, extras_to_string, filter_none, format_requirement, get_default_pyproject_backend, get_pyproject, get_version, init_requirement, is_pinned_requirement, make_install_requirement, normalize_name, parse_extras, specs_to_string, split_markers_from_line, split_ref_from_uri, split_vcs_method_from_uri, validate_path, validate_specifiers, validate_vcs, ) if MYPY_RUNNING: from typing import ( Optional, TypeVar, List, Dict, Union, Any, Tuple, Sequence, Set, AnyStr, Text, Generator, FrozenSet, ) from pip_shims.shims import ( Link, InstallRequirement, PackageFinder, InstallationCandidate, ) RequirementType = TypeVar( "RequirementType", covariant=True, bound=PackagingRequirement ) F = TypeVar("F", "FileRequirement", "VCSRequirement", covariant=True) from six.moves.urllib.parse import SplitResult from .vcs import VCSRepository from .dependencies import AbstractDependency NON_STRING_ITERABLE = Union[List, Set, Tuple] STRING_TYPE = Union[str, bytes, Text] S = TypeVar("S", bytes, str, Text) BASE_TYPES = Union[bool, STRING_TYPE, Tuple[STRING_TYPE, ...]] CUSTOM_TYPES = Union[VCSRepository, RequirementType, SetupInfo, "Line"] CREATION_ARG_TYPES = Union[BASE_TYPES, Link, CUSTOM_TYPES] PIPFILE_ENTRY_TYPE = Union[STRING_TYPE, bool, Tuple[STRING_TYPE], List[STRING_TYPE]] PIPFILE_TYPE = Union[STRING_TYPE, Dict[STRING_TYPE, PIPFILE_ENTRY_TYPE]] TPIPFILE = Dict[STRING_TYPE, PIPFILE_ENTRY_TYPE] SPECIFIERS_BY_LENGTH = sorted(list(Specifier._operators.keys()), key=len, reverse=True) class Line(object): def __init__(self, line, extras=None): # type: (AnyStr, Optional[Union[List[S], Set[S], Tuple[S, ...]]]) -> None self.editable = False # type: bool if line.startswith("-e "): line = line[len("-e ") :] self.editable = True self.extras = () # type: Tuple[STRING_TYPE, ...] if extras is not None: self.extras = tuple(sorted(set(extras))) self.line = line # type: STRING_TYPE self.hashes = [] # type: List[STRING_TYPE] self.markers = None # type: Optional[STRING_TYPE] self.vcs = None # type: Optional[STRING_TYPE] self.path = None # type: Optional[STRING_TYPE] self.relpath = None # type: Optional[STRING_TYPE] self.uri = None # type: Optional[STRING_TYPE] self._link = None # type: Optional[Link] self.is_local = False # type: bool self._name = None # type: Optional[STRING_TYPE] self._specifier = None # type: Optional[STRING_TYPE] self.parsed_marker = None # type: Optional[Marker] self.preferred_scheme = None # type: Optional[STRING_TYPE] self._requirement = None # type: Optional[PackagingRequirement] self._parsed_url = None # type: Optional[URI] self._setup_cfg = None # type: Optional[STRING_TYPE] self._setup_py = None # type: Optional[STRING_TYPE] self._pyproject_toml = None # type: Optional[STRING_TYPE] self._pyproject_requires = None # type: Optional[Tuple[STRING_TYPE, ...]] self._pyproject_backend = None # type: Optional[STRING_TYPE] self._wheel_kwargs = None # type: Optional[Dict[STRING_TYPE, STRING_TYPE]] self._vcsrepo = None # type: Optional[VCSRepository] self._setup_info = None # type: Optional[SetupInfo] self._ref = None # type: Optional[STRING_TYPE] self._ireq = None # type: Optional[InstallRequirement] self._src_root = None # type: Optional[STRING_TYPE] self.dist = None # type: Any super(Line, self).__init__() self.parse() def __hash__(self): return hash( ( self.editable, self.line, self.markers, tuple(self.extras), tuple(self.hashes), self.vcs, self.uri, self.path, self.name, self._requirement, ) ) def __repr__(self): try: return ( "<Line (editable={self.editable}, name={self._name}, path={self.path}, " "uri={self.uri}, extras={self.extras}, markers={self.markers}, vcs={self.vcs}" ", specifier={self._specifier}, pyproject={self._pyproject_toml}, " "pyproject_requires={self._pyproject_requires}, " "pyproject_backend={self._pyproject_backend}, ireq={self._ireq})>".format( self=self ) ) except Exception: return "<Line {0}>".format(self.__dict__.values()) def __str__(self): # type: () -> str if self.markers: return "{0}; {1}".format(self.get_line(), self.markers) return self.get_line() def get_line( self, with_prefix=False, with_markers=False, with_hashes=True, as_list=False ): # type: (bool, bool, bool, bool) -> Union[STRING_TYPE, List[STRING_TYPE]] line = self.line extras_str = extras_to_string(self.extras) with_hashes = False if self.editable or self.is_vcs else with_hashes hash_list = ["--hash={0}".format(h) for h in self.hashes] if self.is_named: line = self.name_and_specifier elif self.is_direct_url: line = self.link.url elif extras_str: if self.is_vcs: line = self.link.url if "git+file:/" in line and "git+file:///" not in line: line = line.replace("git+file:/", "git+file:///") elif extras_str not in line: line = "{0}{1}".format(line, extras_str) # XXX: For using markers on vcs or url requirements, they can be used # as normal (i.e. no space between the requirement and the semicolon) # and no additional quoting as long as they are not editable requirements # HOWEVER, for editable requirements, the requirement+marker must be quoted # We do this here for the line-formatted versions, but leave it up to the # `Script.parse()` functionality in pipenv, for instance, to handle that # in a cross-platform manner for the `as_list` approach since that is how # we anticipate this will be used if passing directly to the command line # for pip. if with_markers and self.markers: line = "{0}; {1}".format(line, self.markers) if with_prefix and self.editable and not as_list: line = '"{0}"'.format(line) if as_list: result_list = [] if with_prefix and self.editable: result_list.append("-e") result_list.append(line) if with_hashes: result_list.extend(self.hashes) return result_list if with_prefix and self.editable: line = "-e {0}".format(line) if with_hashes and hash_list: line = "{0} {1}".format(line, " ".join(hash_list)) return line @property def name_and_specifier(self): name_str, spec_str = "", "" if self.name: name_str = "{0}".format(self.name.lower()) extras_str = extras_to_string(self.extras) if extras_str: name_str = "{0}{1}".format(name_str, extras_str) if self.specifier: spec_str = "{0}".format(self.specifier) return "{0}{1}".format(name_str, spec_str) @classmethod def split_hashes(cls, line): # type: (S) -> Tuple[S, List[S]] if "--hash" not in line: return line, [] split_line = line.split() line_parts = [] # type: List[S] hashes = [] # type: List[S] for part in split_line: if part.startswith("--hash"): param, _, value = part.partition("=") hashes.append(value) else: line_parts.append(part) line = " ".join(line_parts) return line, hashes @property def line_with_prefix(self): # type: () -> STRING_TYPE return self.get_line(with_prefix=True, with_hashes=False) @property def line_for_ireq(self): # type: () -> STRING_TYPE line = "" # type: STRING_TYPE if self.is_file or self.is_remote_url and not self.is_vcs: scheme = self.preferred_scheme if self.preferred_scheme is not None else "uri" local_line = next( iter( [ os.path.dirname(os.path.abspath(f)) for f in [self.setup_py, self.setup_cfg, self.pyproject_toml] if f is not None ] ), None, ) if local_line and self.extras: local_line = "{0}{1}".format(local_line, extras_to_string(self.extras)) line = local_line if local_line is not None else self.line if scheme == "path": if not line and self.base_path is not None: line = os.path.abspath(self.base_path) else: if DIRECT_URL_RE.match(self.line): uri = URI.parse(self.line) line = uri.full_url self._requirement = init_requirement(self.line) line = convert_direct_url_to_url(self.line) else: if self.link: line = self.link.url else: try: uri = URI.parse(line) except ValueError: line = line else: line = uri.base_url self._link = uri.as_link if self.editable: if not line: if self.is_path or self.is_file: if not self.path and self.url is not None: line = pip_shims.shims.url_to_path(self.url) else: line = self.path if self.extras: line = "{0}{1}".format(line, extras_to_string(self.extras)) else: line = self.link.url elif self.is_vcs and not self.editable: line = add_ssh_scheme_to_git_uri(self.line) if not line: line = self.line return line @property def base_path(self): # type: () -> Optional[S] if not self.link and not self.path: self.parse_link() if not self.path: pass path = normalize_path(self.path) if os.path.exists(path) and os.path.isdir(path): path = path elif os.path.exists(path) and os.path.isfile(path): path = os.path.dirname(path) else: path = None return path @property def setup_py(self): # type: () -> Optional[STRING_TYPE] if self._setup_py is None: self.populate_setup_paths() return self._setup_py @property def setup_cfg(self): # type: () -> Optional[STRING_TYPE] if self._setup_cfg is None: self.populate_setup_paths() return self._setup_cfg @property def pyproject_toml(self): # type: () -> Optional[STRING_TYPE] if self._pyproject_toml is None: self.populate_setup_paths() return self._pyproject_toml @property def specifier(self): # type: () -> Optional[STRING_TYPE] options = [self._specifier] for req in (self.ireq, self.requirement): if req is not None and getattr(req, "specifier", None): options.append(req.specifier) specifier = next( iter(spec for spec in options if spec is not None), None ) # type: Optional[Union[Specifier, SpecifierSet]] spec_string = None # type: Optional[STRING_TYPE] if specifier is not None: spec_string = specs_to_string(specifier) elif ( specifier is None and not self.is_named and (self._setup_info is not None and self._setup_info.version) ): spec_string = "=={0}".format(self._setup_info.version) if spec_string: self._specifier = spec_string return self._specifier @specifier.setter def specifier(self, spec): # type: (str) -> None if not spec.startswith("=="): spec = "=={0}".format(spec) self._specifier = spec self.specifiers = SpecifierSet(spec) @property def specifiers(self): # type: () -> Optional[SpecifierSet] ireq_needs_specifier = False req_needs_specifier = False if self.ireq is None or self.ireq.req is None or not self.ireq.req.specifier: ireq_needs_specifier = True if self.requirement is None or not self.requirement.specifier: req_needs_specifier = True if any([ireq_needs_specifier, req_needs_specifier]): # TODO: Should we include versions for VCS dependencies? IS there a reason not # to? For now we are using hashes as the equivalent to pin # note: we need versions for direct dependencies at the very least if ( self.is_file or self.is_remote_url or self.is_path or (self.is_vcs and not self.editable) ): if self.specifier is not None: specifier = self.specifier if not isinstance(specifier, SpecifierSet): specifier = SpecifierSet(specifier) self.specifiers = specifier return specifier if self.ireq is not None and self.ireq.req is not None: return self.ireq.req.specifier elif self.requirement is not None: return self.requirement.specifier return None @specifiers.setter def specifiers(self, specifiers): # type: (Union[Text, str, SpecifierSet]) -> None if not isinstance(specifiers, SpecifierSet): if isinstance(specifiers, six.string_types): specifiers = SpecifierSet(specifiers) else: raise TypeError("Must pass a string or a SpecifierSet") specs = self.get_requirement_specs(specifiers) if self.ireq is not None and self._ireq and self._ireq.req is not None: self._ireq.req.specifier = specifiers self._ireq.req.specs = specs if self.requirement is not None: self.requirement.specifier = specifiers self.requirement.specs = specs @classmethod def get_requirement_specs(cls, specifierset): # type: (SpecifierSet) -> List[Tuple[AnyStr, AnyStr]] specs = [] spec = next(iter(specifierset._specs), None) if spec: specs.append(spec._spec) return specs @property def requirement(self): # type: () -> Optional[RequirementType] if self._requirement is None: self.parse_requirement() if self._requirement is None and self._name is not None: self._requirement = init_requirement(canonicalize_name(self.name)) if self.is_file or self.is_remote_url and self._requirement is not None: self._requirement.url = self.url if ( self._requirement and self._requirement.specifier and not self._requirement.specs ): specs = self.get_requirement_specs(self._requirement.specifier) self._requirement.specs = specs return self._requirement def populate_setup_paths(self): # type: () -> None if not self.link and not self.path: self.parse_link() if not self.path: return base_path = self.base_path if base_path is None: return setup_paths = get_setup_paths( base_path, subdirectory=self.subdirectory ) # type: Dict[STRING_TYPE, Optional[STRING_TYPE]] self._setup_py = setup_paths.get("setup_py") self._setup_cfg = setup_paths.get("setup_cfg") self._pyproject_toml = setup_paths.get("pyproject_toml") @property def pyproject_requires(self): # type: () -> Optional[Tuple[STRING_TYPE, ...]] if self._pyproject_requires is None and self.pyproject_toml is not None: if self.path is not None: pyproject_requires, pyproject_backend = None, None pyproject_results = get_pyproject(self.path) # type: ignore if pyproject_results: pyproject_requires, pyproject_backend = pyproject_results if pyproject_requires: self._pyproject_requires = tuple(pyproject_requires) self._pyproject_backend = pyproject_backend return self._pyproject_requires @property def pyproject_backend(self): # type: () -> Optional[STRING_TYPE] if self._pyproject_requires is None and self.pyproject_toml is not None: pyproject_requires = None # type: Optional[Sequence[STRING_TYPE]] pyproject_backend = None # type: Optional[STRING_TYPE] pyproject_results = get_pyproject(self.path) # type: ignore if pyproject_results: pyproject_requires, pyproject_backend = pyproject_results if not pyproject_backend and self.setup_cfg is not None: setup_dict = SetupInfo.get_setup_cfg(self.setup_cfg) pyproject_backend = get_default_pyproject_backend() pyproject_requires = setup_dict.get( "build_requires", ["setuptools", "wheel"] ) # type: ignore if pyproject_requires: self._pyproject_requires = tuple(pyproject_requires) if pyproject_backend: self._pyproject_backend = pyproject_backend return self._pyproject_backend def parse_hashes(self): # type: () -> "Line" """Parse hashes from *self.line* and set them on the current object. :returns: Self :rtype: `:class:~Line` """ line, hashes = self.split_hashes(self.line) self.hashes = hashes self.line = line return self def parse_extras(self): # type: () -> "Line" """ Parse extras from *self.line* and set them on the current object :returns: self :rtype: :class:`~Line` """ extras = None line = "{0}".format(self.line) if any([self.is_vcs, self.is_url, "@" in line]): try: if self.parsed_url.name: self._name = self.parsed_url.name if ( self.parsed_url.host and self.parsed_url.path and self.parsed_url.scheme ): self.line = self.parsed_url.to_string( escape_password=False, direct=False, strip_ssh=self.parsed_url.is_implicit_ssh, ) except ValueError: self.line, extras = pip_shims.shims._strip_extras(self.line) else: self.line, extras = pip_shims.shims._strip_extras(self.line) extras_set = set() # type: Set[STRING_TYPE] if extras is not None: extras_set = set(parse_extras(extras)) if self._name: self._name, name_extras = pip_shims.shims._strip_extras(self._name) if name_extras: name_extras = set(parse_extras(name_extras)) extras_set |= name_extras if extras_set is not None: self.extras = tuple(sorted(extras_set)) return self def get_url(self): # type: () -> STRING_TYPE """Sets ``self.name`` if given a **PEP-508** style URL.""" return self.parsed_url.to_string( escape_password=False, direct=False, strip_ref=True ) @property def name(self): # type: () -> Optional[STRING_TYPE] if self._name is None: self.parse_name() if self._name is None and not self.is_named and not self.is_wheel: if self.setup_info: self._name = self.setup_info.name elif self.is_wheel: self._name = self._parse_wheel() if not self._name: self._name = self.ireq.name return self._name @name.setter def name(self, name): # type: (STRING_TYPE) -> None self._name = name if self._setup_info: self._setup_info.name = name if self.requirement and self._requirement: self._requirement.name = name if self.ireq and self._ireq and self._ireq.req: self._ireq.req.name = name @property def url(self): # type: () -> Optional[STRING_TYPE] try: return self.parsed_url.to_string( escape_password=False, strip_ref=True, strip_name=True, strip_subdir=True, strip_ssh=False, ) except ValueError: return None @property def link(self): # type: () -> Link if self._link is None: self.parse_link() return self._link @property def subdirectory(self): # type: () -> Optional[STRING_TYPE] if self.link is not None: return self.link.subdirectory_fragment return "" @property def is_wheel(self): # type: () -> bool if self.link is None: return False return self.link.is_wheel @property def is_artifact(self): # type: () -> bool if self.link is None: return False return not self.link.is_vcs @property def is_vcs(self): # type: () -> bool # Installable local files and installable non-vcs urls are handled # as files, generally speaking try: if is_vcs(self.line) or is_vcs(self.get_url()): return True except ValueError: return False return False @property def is_url(self): # type: () -> bool try: url = self.get_url() except ValueError: return False if is_valid_url(url) or is_file_url(url): return True return False @property def is_remote_url(self): # type: () -> bool return self.is_url and self.parsed_url.host is not None @property def is_path(self): # type: () -> bool try: line_url = self.get_url() except ValueError: line_url = None if ( self.path and ( self.path.startswith(".") or os.path.isabs(self.path) or os.path.exists(self.path) ) and is_installable_file(self.path) ): return True elif (os.path.exists(self.line) and is_installable_file(self.line)) or ( line_url and os.path.exists(line_url) and is_installable_file(line_url) ): return True return False @property def is_file_url(self): # type: () -> bool try: url = self.get_url() except ValueError: return False try: parsed_url_scheme = self.parsed_url.scheme except ValueError: return False if url and is_file_url(url) or parsed_url_scheme == "file": return True return False @property def is_file(self): # type: () -> bool try: url = self.get_url() except ValueError: return False if ( self.is_path or (is_file_url(url) and is_installable_file(url)) or ( self._parsed_url and self._parsed_url.is_file_url and is_installable_file(self._parsed_url.url_without_fragment_or_ref) ) ): return True return False @property def is_named(self): # type: () -> bool return not ( self.is_file_url or self.is_url or self.is_file or self.is_vcs or self.is_direct_url ) @property def ref(self): # type: () -> Optional[STRING_TYPE] if self._ref is None and self.relpath is not None: self.relpath, self._ref = split_ref_from_uri(self.relpath) return self._ref @property def ireq(self): # type: () -> Optional[pip_shims.InstallRequirement] if self._ireq is None: self.parse_ireq() return self._ireq @property def is_installable(self): # type: () -> bool try: url = self.get_url() except ValueError: url = None possible_paths = (self.line, url, self.path, self.base_path) return any(is_installable_file(p) for p in possible_paths if p is not None) @property def wheel_kwargs(self): if not self._wheel_kwargs: self._wheel_kwargs = _prepare_wheel_building_kwargs(self.ireq) return self._wheel_kwargs def get_setup_info(self): # type: () -> SetupInfo setup_info = None with pip_shims.shims.global_tempdir_manager(): setup_info = SetupInfo.from_ireq(self.ireq, subdir=self.subdirectory) if not setup_info.name: setup_info.get_info() return setup_info @property def setup_info(self): # type: () -> Optional[SetupInfo] if not self._setup_info and not self.is_named and not self.is_wheel: # make two attempts at this before failing to allow for stale data try: self.setup_info = self.get_setup_info() except FileNotFoundError: try: self.setup_info = self.get_setup_info() except FileNotFoundError: raise return self._setup_info @setup_info.setter def setup_info(self, setup_info): # type: (SetupInfo) -> None self._setup_info = setup_info if setup_info.version: self.specifier = setup_info.version if setup_info.name and not self.name: self.name = setup_info.name def _get_vcsrepo(self): # type: () -> Optional[VCSRepository] from .vcs import VCSRepository checkout_directory = self.wheel_kwargs["src_dir"] # type: ignore if self.name is not None: checkout_directory = os.path.join( checkout_directory, self.name ) # type: ignore vcsrepo = VCSRepository( url=self.link.url, name=self.name, ref=self.ref if self.ref else None, checkout_directory=checkout_directory, vcs_type=self.vcs, subdirectory=self.subdirectory, ) if not (self.link.scheme.startswith("file") and self.editable): vcsrepo.obtain() return vcsrepo @property def vcsrepo(self): # type: () -> Optional[VCSRepository] if self._vcsrepo is None and self.is_vcs: self._vcsrepo = self._get_vcsrepo() return self._vcsrepo @property def parsed_url(self): # type: () -> URI if self._parsed_url is None: self._parsed_url = URI.parse(self.line) return self._parsed_url @property def is_direct_url(self): # type: () -> bool try: return self.is_url and self.parsed_url.is_direct_url except ValueError: return self.is_url and bool(DIRECT_URL_RE.match(self.line)) @cached_property def metadata(self): # type: () -> Dict[Any, Any] if self.is_local and self.path and is_installable_dir(self.path): return get_metadata(self.path) return {} @cached_property def parsed_setup_cfg(self): # type: () -> Dict[Any, Any] if not ( self.is_local and self.path and is_installable_dir(self.path) and self.setup_cfg ): return {} return self.setup_info.parse_setup_cfg() @cached_property def parsed_setup_py(self): # type: () -> Dict[Any, Any] if self.is_local and self.path and is_installable_dir(self.path): if self.setup_py: return ast_parse_setup_py(self.setup_py) return {} @vcsrepo.setter def vcsrepo(self, repo): # type (VCSRepository) -> None self._vcsrepo = repo ireq = self.ireq wheel_kwargs = self.wheel_kwargs.copy() wheel_kwargs["src_dir"] = repo.checkout_directory with pip_shims.shims.global_tempdir_manager(), temp_path(): ireq.ensure_has_source_dir(wheel_kwargs["src_dir"]) sys.path = [repo.checkout_directory, "", ".", get_python_lib(plat_specific=0)] setupinfo = SetupInfo.create( repo.checkout_directory, ireq=ireq, subdirectory=self.subdirectory, kwargs=wheel_kwargs, ) self._setup_info = setupinfo self._setup_info.reload() def get_ireq(self): # type: () -> InstallRequirement line = self.line_for_ireq if self.editable: ireq = pip_shims.shims.install_req_from_editable(line) else: ireq = pip_shims.shims.install_req_from_line(line) if self.is_named: ireq = pip_shims.shims.install_req_from_line(self.line) if self.is_file or self.is_remote_url: ireq.link = pip_shims.shims.Link(expand_env_variables(self.link.url)) if self.extras and not ireq.extras: ireq.extras = set(self.extras) if self.parsed_marker is not None and not ireq.markers: ireq.markers = self.parsed_marker if not ireq.req and self._requirement is not None: ireq.req = copy.deepcopy(self._requirement) return ireq def parse_ireq(self): # type: () -> None if self._ireq is None: self._ireq = self.get_ireq() if self._ireq is not None: if self.requirement is not None and self._ireq.req is None: self._ireq.req = self.requirement def _parse_wheel(self): # type: () -> Optional[STRING_TYPE] if not self.is_wheel: pass from pip_shims.shims import Wheel _wheel = Wheel(self.link.filename) name = _wheel.name version = _wheel.version self._specifier = "=={0}".format(version) return name def _parse_name_from_link(self): # type: () -> Optional[STRING_TYPE] if self.link is None: return None if getattr(self.link, "egg_fragment", None): return self.link.egg_fragment elif self.is_wheel: return self._parse_wheel() return None def _parse_name_from_line(self): # type: () -> Optional[STRING_TYPE] if not self.is_named: pass try: self._requirement = init_requirement(self.line) except Exception: raise RequirementError( "Failed parsing requirement from {0!r}".format(self.line) ) name = self._requirement.name if not self._specifier and self._requirement and self._requirement.specifier: self._specifier = specs_to_string(self._requirement.specifier) if self._requirement.extras and not self.extras: self.extras = self._requirement.extras if not name: name = self.line specifier_match = next( iter(spec for spec in SPECIFIERS_BY_LENGTH if spec in self.line), None ) specifier = None # type: Optional[STRING_TYPE] if specifier_match: specifier = "{0!s}".format(specifier_match) if specifier is not None and specifier in name: version = None # type: Optional[STRING_TYPE] name, specifier, version = name.partition(specifier) self._specifier = "{0}{1}".format(specifier, version) return name def _parse_name_from_path(self): # type: () -> Optional[S] if self.path and self.is_local and is_installable_dir(self.path): metadata = get_metadata(self.path) if metadata: name = metadata.get("name", "") if name and name != "wheel": return name parsed_setup_cfg = self.parsed_setup_cfg if parsed_setup_cfg: name = parsed_setup_cfg.get("name", "") if name: return name parsed_setup_py = self.parsed_setup_py if parsed_setup_py: name = parsed_setup_py.get("name", "") if name and isinstance(name, six.string_types): return name return None def parse_name(self): # type: () -> "Line" if self._name is None: name = None if self.link is not None and self.line_is_installable: name = self._parse_name_from_link() if name is None and ( (self.is_remote_url or self.is_artifact or self.is_vcs) and self._parsed_url ): if self._parsed_url.fragment: _, _, name = self._parsed_url.fragment.partition("egg=") if "&" in name: # subdirectory fragments might also be in here name, _, _ = name.partition("&") if name is None and self.is_named: name = self._parse_name_from_line() elif name is None and self.is_file or self.is_remote_url or self.is_path: if self.is_local: name = self._parse_name_from_path() if name is not None: name, extras = pip_shims.shims._strip_extras(name) if extras is not None and not self.extras: self.extras = tuple(sorted(set(parse_extras(extras)))) self._name = name return self def _parse_requirement_from_vcs(self): # type: () -> Optional[PackagingRequirement] url = self.url if self.url else self.link.url if url: url = unquote(url) if ( url and self.uri != url and "git+ssh://" in url and (self.uri is not None and "git+git@" in self.uri) and self._requirement is not None ): self._requirement.line = self.uri self._requirement.url = self.url vcs_uri = build_vcs_uri( # type: ignore vcs=self.vcs, uri=self.url, ref=self.ref, subdirectory=self.subdirectory, extras=self.extras, name=self.name, ) if vcs_uri: self._requirement.link = create_link(vcs_uri) elif self.link: self._requirement.link = self.link # else: # req.link = self.link if self.ref and self._requirement is not None: self._requirement.revision = self.ref if self._vcsrepo is not None: with pip_shims.shims.global_tempdir_manager(): self._requirement.revision = self._vcsrepo.get_commit_hash() return self._requirement def parse_requirement(self): # type: () -> "Line" if self._name is None: self.parse_name() if not any([self._name, self.is_vcs, self.is_named]): if self.setup_info and self.setup_info.name: self._name = self.setup_info.name name, extras, url = self.requirement_info if name: self._requirement = init_requirement(name) # type: PackagingRequirement if extras: self._requirement.extras = set(extras) if url: self._requirement.url = url if self.is_direct_url: url = self.link.url if self.link: self._requirement.link = self.link self._requirement.editable = self.editable if self.path and self.link and self.link.scheme.startswith("file"): self._requirement.local_file = True self._requirement.path = self.path if self.is_vcs: self._requirement.vcs = self.vcs self._requirement.line = self.link.url self._parse_requirement_from_vcs() else: self._requirement.line = self.line if self.parsed_marker is not None: self._requirement.marker = self.parsed_marker if self.specifiers: self._requirement.specifier = self.specifiers specs = [] spec = next(iter(s for s in self.specifiers._specs), None) if spec: specs.append(spec._spec) self._requirement.spec = spec else: if self.is_vcs: raise ValueError( "pipenv requires an #egg fragment for version controlled " "dependencies. Please install remote dependency " "in the form {0}#egg=<package-name>.".format(url) ) return self def parse_link(self): # type: () -> "Line" parsed_url = None # type: Optional[URI] if ( not is_valid_url(self.line) and is_installable_file(os.path.abspath(self.line)) and ( self.line.startswith("./") or (os.path.exists(self.line) or os.path.isabs(self.line)) ) ): url = pip_shims.shims.path_to_url(os.path.abspath(self.line)) self._parsed_url = parsed_url = URI.parse(url) elif any( [ is_valid_url(self.line), is_vcs(self.line), is_file_url(self.line), self.is_direct_url, ] ): parsed_url = self.parsed_url if parsed_url is None or ( parsed_url.is_file_url and not parsed_url.is_installable ): return None if parsed_url.is_vcs: self.vcs, _ = parsed_url.scheme.split("+") if parsed_url.is_file_url: self.is_local = True parsed_link = parsed_url.as_link self._ref = parsed_url.ref self.uri = parsed_url.bare_url if parsed_url.name: self._name = parsed_url.name if parsed_url.extras: self.extras = tuple(sorted(set(parsed_url.extras))) self._link = parsed_link vcs, prefer, relpath, path, uri, link = FileRequirement.get_link_from_line( self.line ) ref = None if link is not None and "@" in unquote(link.path) and uri is not None: uri, _, ref = unquote(uri).rpartition("@") if relpath is not None and "@" in relpath: relpath, _, ref = relpath.rpartition("@") if path is not None and "@" in path: path, _ = split_ref_from_uri(path) link_url = link.url_without_fragment if "@" in link_url: link_url, _ = split_ref_from_uri(link_url) self.preferred_scheme = prefer self.relpath = relpath self.path = path # self.uri = uri if prefer in ("path", "relpath") or uri.startswith("file"): self.is_local = True if parsed_url.is_vcs or parsed_url.is_direct_url and parsed_link: self._link = parsed_link else: self._link = link return self def parse_markers(self): # type: () -> None if self.markers: marker_str = self.markers.replace('"', "'") markers = PackagingRequirement("fakepkg; {0}".format(marker_str)).marker self.parsed_marker = markers @property def requirement_info(self): # type: () -> Tuple[Optional[S], Tuple[Optional[S], ...], Optional[S]] """ Generates a 3-tuple of the requisite *name*, *extras* and *url* to generate a :class:`~packaging.requirements.Requirement` out of. :return: A Tuple of an optional name, a Tuple of extras, and an optional URL. :rtype: Tuple[Optional[S], Tuple[Optional[S], ...], Optional[S]] """ # Direct URLs can be converted to packaging requirements directly, but # only if they are `file://` (with only two slashes) name = None # type: Optional[S] extras = () # type: Tuple[Optional[S], ...] url = None # type: Optional[STRING_TYPE] # if self.is_direct_url: if self._name: name = canonicalize_name(self._name) if self.is_file or self.is_url or self.is_path or self.is_file_url or self.is_vcs: url = "" if self.is_vcs: url = self.url if self.url else self.uri if self.is_direct_url: url = self.link.url_without_fragment else: if self.link: url = self.link.url_without_fragment elif self.url: url = self.url if self.ref: url = "{0}@{1}".format(url, self.ref) else: url = self.uri if self.link and name is None: self._name = self.link.egg_fragment if self._name: name = canonicalize_name(self._name) return name, extras, url # type: ignore @property def line_is_installable(self): # type: () -> bool """This is a safeguard against decoy requirements when a user installs a package whose name coincides with the name of a folder in the cwd, e.g. install *alembic* when there is a folder called *alembic* in the working directory. In this case we first need to check that the given requirement is a valid URL, VCS requirement, or installable filesystem path before deciding to treat it as a file requirement over a named requirement. """ line = self.line direct_url_match = DIRECT_URL_RE.match(line) if direct_url_match: match_dict = direct_url_match.groupdict() auth = "" username = match_dict.get("username", None) password = match_dict.get("password", None) port = match_dict.get("port", None) path = match_dict.get("path", None) ref = match_dict.get("ref", None) if username is not None: auth = "{0}".format(username) if password: auth = "{0}:{1}".format(auth, password) if auth else password line = match_dict.get("host", "") if auth: line = "{auth}@{line}".format(auth=auth, line=line) if port: line = "{line}:{port}".format(line=line, port=port) if path: line = "{line}{pathsep}{path}".format( line=line, pathsep=match_dict["pathsep"], path=path ) if ref: line = "{line}@{ref}".format(line=line, ref=ref) line = "{scheme}{line}".format(scheme=match_dict["scheme"], line=line) if is_file_url(line): link = create_link(line) line = link.url_without_fragment line, _ = split_ref_from_uri(line) if ( is_vcs(line) or (not is_file_url(line) and is_valid_url(line)) or (is_file_url(line) and is_installable_file(line)) or is_installable_file(line) ): return True return False def parse(self): # type: () -> None self.line = self.line.strip() if self.line.startswith('"'): self.line = self.line.strip('"') self.line, self.markers = split_markers_from_line(self.parse_hashes().line) if self.markers: self.markers = self.markers.replace('"', "'") self.parse_extras() self.line = self.line.strip('"').strip("'").strip() if self.line.startswith("git+file:/") and not self.line.startswith( "git+file:///" ): self.line = self.line.replace("git+file:/", "git+file:///") self.parse_markers() if self.is_file_url: if self.line_is_installable: self.populate_setup_paths() else: raise RequirementError( "Supplied requirement is not installable: {0!r}".format(self.line) ) elif self.is_named and self._name is None: self.parse_name() self.parse_link() # self.parse_requirement() # self.parse_ireq() @attr.s(slots=True, hash=True) class NamedRequirement(object): name = attr.ib() # type: STRING_TYPE version = attr.ib() # type: Optional[STRING_TYPE] req = attr.ib() # type: PackagingRequirement extras = attr.ib(default=attr.Factory(list)) # type: Tuple[STRING_TYPE, ...] editable = attr.ib(default=False) # type: bool _parsed_line = attr.ib(default=None) # type: Optional[Line] @req.default def get_requirement(self): # type: () -> RequirementType req = init_requirement( "{0}{1}".format(canonicalize_name(self.name), self.version) ) return req @property def parsed_line(self): # type: () -> Optional[Line] if self._parsed_line is None: self._parsed_line = Line(self.line_part) return self._parsed_line @classmethod def from_line(cls, line, parsed_line=None): # type: (AnyStr, Optional[Line]) -> NamedRequirement req = init_requirement(line) specifiers = None # type: Optional[STRING_TYPE] if req.specifier: specifiers = specs_to_string(req.specifier) req.line = line name = getattr(req, "name", None) if not name: name = getattr(req, "project_name", None) req.name = name if not name: name = getattr(req, "key", line) req.name = name creation_kwargs = { "name": name, "version": specifiers, "req": req, "parsed_line": parsed_line, "extras": None, } extras = None # type: Optional[Tuple[STRING_TYPE, ...]] if req.extras: extras = tuple(req.extras) creation_kwargs["extras"] = extras return cls(**creation_kwargs) @classmethod def from_pipfile(cls, name, pipfile): # type: (S, TPIPFILE) -> NamedRequirement creation_args = {} # type: TPIPFILE if hasattr(pipfile, "keys"): attr_fields = [field.name for field in attr.fields(cls)] creation_args = { k: v for k, v in pipfile.items() if k in attr_fields } # type: ignore creation_args["name"] = name version = get_version(pipfile) # type: Optional[STRING_TYPE] extras = creation_args.get("extras", None) creation_args["version"] = version # type: ignore req = init_requirement("{0}{1}".format(name, version)) if req and extras and req.extras and isinstance(req.extras, tuple): if isinstance(extras, six.string_types): req.extras = (extras) + tuple(["{0}".format(xtra) for xtra in req.extras]) elif isinstance(extras, (tuple, list)): req.extras += tuple(extras) creation_args["req"] = req return cls(**creation_args) # type: ignore @property def line_part(self): # type: () -> STRING_TYPE # FIXME: This should actually be canonicalized but for now we have to # simply lowercase it and replace underscores, since full canonicalization # also replaces dots and that doesn't actually work when querying the index return normalize_name(self.name) @property def pipfile_part(self): # type: () -> Dict[STRING_TYPE, Any] pipfile_dict = attr.asdict(self, filter=filter_none).copy() # type: ignore if "version" not in pipfile_dict: pipfile_dict["version"] = "*" if "_parsed_line" in pipfile_dict: pipfile_dict.pop("_parsed_line") name = pipfile_dict.pop("name") return {name: pipfile_dict} LinkInfo = collections.namedtuple( "LinkInfo", ["vcs_type", "prefer", "relpath", "path", "uri", "link"] ) @attr.s(slots=True, eq=True, order=True, hash=True) class FileRequirement(object): """File requirements for tar.gz installable files or wheels or setup.py containing directories.""" #: Path to the relevant `setup.py` location setup_path = attr.ib(default=None, eq=True, order=True) # type: Optional[STRING_TYPE] #: path to hit - without any of the VCS prefixes (like git+ / http+ / etc) path = attr.ib(default=None, eq=True, order=True) # type: Optional[STRING_TYPE] #: Whether the package is editable editable = attr.ib(default=False, eq=True, order=True) # type: bool #: Extras if applicable extras = attr.ib( default=attr.Factory(tuple), eq=True, order=True ) # type: Tuple[STRING_TYPE, ...] _uri_scheme = attr.ib( default=None, eq=True, order=True ) # type: Optional[STRING_TYPE] #: URI of the package uri = attr.ib(eq=True, order=True) # type: Optional[STRING_TYPE] #: Link object representing the package to clone link = attr.ib(eq=True, order=True) # type: Optional[Link] #: PyProject Requirements pyproject_requires = attr.ib( factory=tuple, eq=True, order=True ) # type: Optional[Tuple[STRING_TYPE, ...]] #: PyProject Build System pyproject_backend = attr.ib( default=None, eq=True, order=True ) # type: Optional[STRING_TYPE] #: PyProject Path pyproject_path = attr.ib( default=None, eq=True, order=True ) # type: Optional[STRING_TYPE] subdirectory = attr.ib(default=None) # type: Optional[STRING_TYPE] #: Setup metadata e.g. dependencies _setup_info = attr.ib(default=None, eq=True, order=True) # type: Optional[SetupInfo] _has_hashed_name = attr.ib(default=False, eq=True, order=True) # type: bool _parsed_line = attr.ib( default=None, eq=False, order=False, hash=True ) # type: Optional[Line] #: Package name name = attr.ib(eq=True, order=True) # type: Optional[STRING_TYPE] #: A :class:`~pkg_resources.Requirement` instance req = attr.ib(eq=True, order=True) # type: Optional[PackagingRequirement] @classmethod def get_link_from_line(cls, line): # type: (STRING_TYPE) -> LinkInfo """Parse link information from given requirement line. Return a 6-tuple: - `vcs_type` indicates the VCS to use (e.g. "git"), or None. - `prefer` is either "file", "path" or "uri", indicating how the information should be used in later stages. - `relpath` is the relative path to use when recording the dependency, instead of the absolute path/URI used to perform installation. This can be None (to prefer the absolute path or URI). - `path` is the absolute file path to the package. This will always use forward slashes. Can be None if the line is a remote URI. - `uri` is the absolute URI to the package. Can be None if the line is not a URI. - `link` is an instance of :class:`pip._internal.index.Link`, representing a URI parse result based on the value of `uri`. This function is provided to deal with edge cases concerning URIs without a valid netloc. Those URIs are problematic to a straight ``urlsplit` call because they cannot be reliably reconstructed with ``urlunsplit`` due to a bug in the standard library: >>> from urllib.parse import urlsplit, urlunsplit >>> urlunsplit(urlsplit('git+file:///this/breaks')) 'git+file:/this/breaks' >>> urlunsplit(urlsplit('file:///this/works')) 'file:///this/works' See `https://bugs.python.org/issue23505#msg277350`. """ # Git allows `git@github.com...` lines that are not really URIs. # Add "ssh://" so we can parse correctly, and restore afterwards. fixed_line = add_ssh_scheme_to_git_uri(line) # type: STRING_TYPE added_ssh_scheme = fixed_line != line # type: bool # We can assume a lot of things if this is a local filesystem path. if "://" not in fixed_line: p = Path(fixed_line).absolute() # type: Path path = p.as_posix() # type: Optional[STRING_TYPE] uri = p.as_uri() # type: STRING_TYPE link = create_link(uri) # type: Link relpath = None # type: Optional[STRING_TYPE] try: relpath = get_converted_relative_path(path) except ValueError: relpath = None return LinkInfo(None, "path", relpath, path, uri, link) # This is an URI. We'll need to perform some elaborated parsing. parsed_url = urllib_parse.urlsplit(fixed_line) # type: SplitResult original_url = parsed_url._replace() # type: SplitResult # Split the VCS part out if needed. original_scheme = parsed_url.scheme # type: STRING_TYPE vcs_type = None # type: Optional[STRING_TYPE] if "+" in original_scheme: scheme = None # type: Optional[STRING_TYPE] vcs_type, _, scheme = original_scheme.partition("+") parsed_url = parsed_url._replace(scheme=scheme) # type: ignore prefer = "uri" # type: STRING_TYPE else: vcs_type = None prefer = "file" if parsed_url.scheme == "file" and parsed_url.path: # This is a "file://" URI. Use url_to_path and path_to_url to # ensure the path is absolute. Also we need to build relpath. path = Path( pip_shims.shims.url_to_path(urllib_parse.urlunsplit(parsed_url)) ).as_posix() try: relpath = get_converted_relative_path(path) except ValueError: relpath = None uri = pip_shims.shims.path_to_url(path) else: # This is a remote URI. Simply use it. path = None relpath = None # Cut the fragment, but otherwise this is fixed_line. uri = urllib_parse.urlunsplit( parsed_url._replace(scheme=original_scheme, fragment="") # type: ignore ) if added_ssh_scheme: original_uri = urllib_parse.urlunsplit( original_url._replace(scheme=original_scheme, fragment="") # type: ignore ) uri = strip_ssh_from_git_uri(original_uri) # Re-attach VCS prefix to build a Link. link = create_link( urllib_parse.urlunsplit( parsed_url._replace(scheme=original_scheme) ) # type: ignore ) return LinkInfo(vcs_type, prefer, relpath, path, uri, link) @property def setup_py_dir(self): # type: () -> Optional[STRING_TYPE] if self.setup_path: return os.path.dirname(os.path.abspath(self.setup_path)) return None @property def dependencies(self): # type: () -> Tuple[Dict[S, PackagingRequirement], List[Union[S, PackagingRequirement]], List[S]] build_deps = [] # type: List[Union[S, PackagingRequirement]] setup_deps = [] # type: List[S] deps = {} # type: Dict[S, PackagingRequirement] if self.setup_info: setup_info = self.setup_info.as_dict() deps.update(setup_info.get("requires", {})) setup_deps.extend(setup_info.get("setup_requires", [])) build_deps.extend(setup_info.get("build_requires", [])) if self.extras and self.setup_info.extras: for dep in self.extras: if dep not in self.setup_info.extras: continue extras_list = self.setup_info.extras.get(dep, []) # type: ignore for req_instance in extras_list: # type: ignore deps[req_instance.key] = req_instance if self.pyproject_requires: build_deps.extend(list(self.pyproject_requires)) setup_deps = list(set(setup_deps)) build_deps = list(set(build_deps)) return deps, setup_deps, build_deps def __attrs_post_init__(self): # type: () -> None if self.name is None and self.parsed_line: if self.parsed_line.setup_info: self._setup_info = self.parsed_line.setup_info if self.parsed_line.setup_info.name: self.name = self.parsed_line.setup_info.name if self.req is None and ( self._parsed_line is not None and self._parsed_line.requirement is not None ): self.req = self._parsed_line.requirement if ( self._parsed_line and self._parsed_line.ireq and not self._parsed_line.ireq.req ): if self.req is not None and self._parsed_line._ireq is not None: self._parsed_line._ireq.req = self.req @property def setup_info(self): # type: () -> Optional[SetupInfo] if self._setup_info is None and self.parsed_line: if self.parsed_line and self._parsed_line and self.parsed_line.setup_info: if ( self._parsed_line._setup_info and not self._parsed_line._setup_info.name ): with pip_shims.shims.global_tempdir_manager(): self._parsed_line._setup_info.get_info() self._setup_info = self.parsed_line._setup_info elif self.parsed_line and ( self.parsed_line.ireq and not self.parsed_line.is_wheel ): with pip_shims.shims.global_tempdir_manager(): self._setup_info = SetupInfo.from_ireq( self.parsed_line.ireq, subdir=self.subdirectory ) else: if self.link and not self.link.is_wheel: self._setup_info = Line(self.line_part).setup_info with pip_shims.shims.global_tempdir_manager(): self._setup_info.get_info() return self._setup_info @setup_info.setter def setup_info(self, setup_info): # type: (SetupInfo) -> None self._setup_info = setup_info if self._parsed_line: self._parsed_line._setup_info = setup_info @uri.default def get_uri(self): # type: () -> STRING_TYPE if self.path and not self.uri: self._uri_scheme = "path" return pip_shims.shims.path_to_url(os.path.abspath(self.path)) elif ( getattr(self, "req", None) and self.req is not None and getattr(self.req, "url") ): return self.req.url elif self.link is not None: return self.link.url_without_fragment return "" @name.default def get_name(self): # type: () -> STRING_TYPE if self.parsed_line and self.parsed_line.name: return self.parsed_line.name elif self.link and self.link.egg_fragment: return self.link.egg_fragment elif self.setup_info and self.setup_info.name: return self.setup_info.name @link.default def get_link(self): # type: () -> pip_shims.shims.Link target = "{0}".format(self.uri) if hasattr(self, "name") and not self._has_hashed_name: target = "{0}#egg={1}".format(target, self.name) link = create_link(target) return link @req.default def get_requirement(self): # type: () -> RequirementType if self.name is None: if self._parsed_line is not None and self._parsed_line.name is not None: self.name = self._parsed_line.name else: raise ValueError( "Failed to generate a requirement: missing name for {0!r}".format( self ) ) if self._parsed_line: try: # initialize specifiers to make sure we capture them self._parsed_line.specifiers except Exception: pass req = copy.deepcopy(self._parsed_line.requirement) if req: return req @property def parsed_line(self): # type: () -> Optional[Line] if self._parsed_line is None: self._parsed_line = Line(self.line_part) return self._parsed_line @property def is_local(self): # type: () -> bool uri = getattr(self, "uri", None) if uri is None: if getattr(self, "path", None) and self.path is not None: uri = pip_shims.shims.path_to_url(os.path.abspath(self.path)) elif ( getattr(self, "req", None) and self.req is not None and (getattr(self.req, "url") and self.req.url is not None) ): uri = self.req.url if uri and is_file_url(uri): return True return False @property def is_remote_artifact(self): # type: () -> bool if self.link is None: return False return ( self._parsed_line and not self._parsed_line.is_local and (self._parsed_line.is_artifact or self._parsed_line.is_wheel) and not self.editable ) @property def is_direct_url(self): # type: () -> bool if self._parsed_line is not None and self._parsed_line.is_direct_url: return True return self.is_remote_artifact @property def formatted_path(self): # type: () -> Optional[STRING_TYPE] if self.path: path = self.path if not isinstance(path, Path): path = Path(path) return path.as_posix() return None @classmethod def from_line(cls, line, editable=None, extras=None, parsed_line=None): # type: (AnyStr, Optional[bool], Optional[Tuple[AnyStr, ...]], Optional[Line]) -> F parsed_line = Line(line) file_req_from_parsed_line(parsed_line) @classmethod def from_pipfile(cls, name, pipfile): # type: (STRING_TYPE, Dict[STRING_TYPE, Union[Tuple[STRING_TYPE, ...], STRING_TYPE, bool]]) -> F # Parse the values out. After this dance we should have two variables: # path - Local filesystem path. # uri - Absolute URI that is parsable with urlsplit. # One of these will be a string; the other would be None. uri = pipfile.get("uri") fil = pipfile.get("file") path = pipfile.get("path") if path and isinstance(path, six.string_types): if isinstance(path, Path) and not path.is_absolute(): path = get_converted_relative_path(path.as_posix()) elif not os.path.isabs(path): path = get_converted_relative_path(path) if path and uri: raise ValueError("do not specify both 'path' and 'uri'") if path and fil: raise ValueError("do not specify both 'path' and 'file'") uri = uri or fil # Decide that scheme to use. # 'path' - local filesystem path. # 'file' - A file:// URI (possibly with VCS prefix). # 'uri' - Any other URI. if path: uri_scheme = "path" else: # URI is not currently a valid key in pipfile entries # see https://github.com/pypa/pipfile/issues/110 uri_scheme = "file" if not uri: uri = pip_shims.shims.path_to_url(path) link_info = None # type: Optional[LinkInfo] if uri and isinstance(uri, six.string_types): link_info = cls.get_link_from_line(uri) else: raise ValueError( "Failed parsing requirement from pipfile: {0!r}".format(pipfile) ) link = None # type: Optional[Link] if link_info: link = link_info.link if link.url_without_fragment: uri = link.url_without_fragment extras = () # type: Optional[Tuple[STRING_TYPE, ...]] if "extras" in pipfile: extras = tuple(pipfile["extras"]) # type: ignore editable = pipfile["editable"] if "editable" in pipfile else False arg_dict = { "name": name, "path": path, "uri": uri, "editable": editable, "link": link, "uri_scheme": uri_scheme, "extras": extras if extras else None, } line = "" # type: STRING_TYPE extras_string = "" if not extras else extras_to_string(extras) if editable and uri_scheme == "path": line = "{0}{1}".format(path, extras_string) else: if name: line_name = "{0}{1}".format(name, extras_string) line = "{0}#egg={1}".format(link.url_without_fragment, line_name) else: if link: line = link.url elif uri and isinstance(uri, six.string_types): line = uri else: raise ValueError( "Failed parsing requirement from pipfile: {0!r}".format(pipfile) ) line = "{0}{1}".format(line, extras_string) if "subdirectory" in pipfile: arg_dict["subdirectory"] = pipfile["subdirectory"] line = "{0}&subdirectory={1}".format(line, pipfile["subdirectory"]) if editable: line = "-e {0}".format(line) arg_dict["parsed_line"] = Line(line) arg_dict["setup_info"] = arg_dict["parsed_line"].setup_info return cls(**arg_dict) # type: ignore @property def line_part(self): # type: () -> STRING_TYPE link_url = None # type: Optional[STRING_TYPE] seed = None # type: Optional[STRING_TYPE] if self.link is not None: link_url = self.link.url_without_fragment is_vcs = getattr(self.link, "is_vcs", not self.link.is_artifact) if self._uri_scheme and self._uri_scheme == "path": # We may need any one of these for passing to pip seed = self.path or link_url or self.uri elif (self._uri_scheme and self._uri_scheme == "file") or ( (self.link.is_wheel or not is_vcs) and self.link.url ): seed = link_url or self.uri # add egg fragments to remote artifacts (valid urls only) if not self._has_hashed_name and self.is_remote_artifact and seed is not None: seed += "#egg={0}".format(self.name) editable = "-e " if self.editable else "" if seed is None: raise ValueError("Could not calculate url for {0!r}".format(self)) return "{0}{1}".format(editable, seed) @property def pipfile_part(self): # type: () -> Dict[AnyStr, Dict[AnyStr, Any]] excludes = [ "_base_line", "_has_hashed_name", "setup_path", "pyproject_path", "_uri_scheme", "pyproject_requires", "pyproject_backend", "_setup_info", "_parsed_line", ] filter_func = lambda k, v: bool(v) is True and k.name not in excludes # noqa pipfile_dict = attr.asdict(self, filter=filter_func).copy() # type: Dict name = pipfile_dict.pop("name", None) if name is None: if self.name: name = self.name elif self.parsed_line and self.parsed_line.name: name = self.name = self.parsed_line.name elif self.setup_info and self.setup_info.name: name = self.name = self.setup_info.name if "_uri_scheme" in pipfile_dict: pipfile_dict.pop("_uri_scheme") # For local paths and remote installable artifacts (zipfiles, etc) collision_keys = {"file", "uri", "path"} collision_order = ["file", "uri", "path"] # type: List[STRING_TYPE] collisions = [] # type: List[STRING_TYPE] key_match = next(iter(k for k in collision_order if k in pipfile_dict.keys())) is_vcs = None if self.link is not None: is_vcs = getattr(self.link, "is_vcs", not self.link.is_artifact) if self._uri_scheme: dict_key = self._uri_scheme target_key = dict_key if dict_key in pipfile_dict else key_match if target_key is not None: winning_value = pipfile_dict.pop(target_key) collisions = [k for k in collision_keys if k in pipfile_dict] for key in collisions: pipfile_dict.pop(key) pipfile_dict[dict_key] = winning_value elif ( self.is_remote_artifact or (is_vcs is not None and not is_vcs) and (self._uri_scheme and self._uri_scheme == "file") ): dict_key = "file" # Look for uri first because file is a uri format and this is designed # to make sure we add file keys to the pipfile as a replacement of uri if key_match is not None: winning_value = pipfile_dict.pop(key_match) key_to_remove = (k for k in collision_keys if k in pipfile_dict) for key in key_to_remove: pipfile_dict.pop(key) pipfile_dict[dict_key] = winning_value else: collisions = [key for key in collision_order if key in pipfile_dict.keys()] if len(collisions) > 1: for k in collisions[1:]: pipfile_dict.pop(k) return {name: pipfile_dict} @attr.s(slots=True, hash=True) class VCSRequirement(FileRequirement): #: Whether the repository is editable editable = attr.ib(default=None) # type: Optional[bool] #: URI for the repository uri = attr.ib(default=None) # type: Optional[STRING_TYPE] #: path to the repository, if it's local path = attr.ib( default=None, validator=attr.validators.optional(validate_path) ) # type: Optional[STRING_TYPE] #: vcs type, i.e. git/hg/svn vcs = attr.ib( validator=attr.validators.optional(validate_vcs), default=None ) # type: Optional[STRING_TYPE] #: vcs reference name (branch / commit / tag) ref = attr.ib(default=None) # type: Optional[STRING_TYPE] #: Subdirectory to use for installation if applicable _repo = attr.ib(default=None) # type: Optional[VCSRepository] _base_line = attr.ib(default=None) # type: Optional[STRING_TYPE] name = attr.ib() # type: STRING_TYPE link = attr.ib() # type: Optional[pip_shims.shims.Link] req = attr.ib() # type: Optional[RequirementType] def __attrs_post_init__(self): # type: () -> None if not self.uri: if self.path: self.uri = pip_shims.shims.path_to_url(self.path) if self.uri is not None: split = urllib_parse.urlsplit(self.uri) scheme, rest = split[0], split[1:] vcs_type = "" if "+" in scheme: vcs_type, scheme = scheme.split("+", 1) vcs_type = "{0}+".format(vcs_type) new_uri = urllib_parse.urlunsplit((scheme,) + rest[:-1] + ("",)) new_uri = "{0}{1}".format(vcs_type, new_uri) self.uri = new_uri @property def url(self): # type: () -> STRING_TYPE if self.link and self.link.url: return self.link.url elif self.uri: return self.uri raise ValueError("No valid url found for requirement {0!r}".format(self)) @link.default def get_link(self): # type: () -> pip_shims.shims.Link uri = self.uri if self.uri else pip_shims.shims.path_to_url(self.path) vcs_uri = build_vcs_uri( self.vcs, add_ssh_scheme_to_git_uri(uri), name=self.name, ref=self.ref, subdirectory=self.subdirectory, extras=self.extras, ) return self.get_link_from_line(vcs_uri).link @name.default def get_name(self): # type: () -> STRING_TYPE if self.link and self.link.egg_fragment: return self.link.egg_fragment if self.req and self.req.name: return self.req.name return super(VCSRequirement, self).get_name() @property def vcs_uri(self): # type: () -> Optional[STRING_TYPE] uri = self.uri if uri and not any(uri.startswith("{0}+".format(vcs)) for vcs in VCS_LIST): if self.vcs: uri = "{0}+{1}".format(self.vcs, uri) return uri @property def setup_info(self): if self._parsed_line and self._parsed_line.setup_info: if not self._parsed_line.setup_info.name: with pip_shims.shims.global_tempdir_manager(): self._parsed_line._setup_info.get_info() return self._parsed_line.setup_info subdir = self.subdirectory or self.parsed_line.subdirectory if self._repo: with pip_shims.shims.global_tempdir_manager(): self._setup_info = SetupInfo.from_ireq( Line(self._repo.checkout_directory).ireq, subdir=subdir ) self._setup_info.get_info() return self._setup_info ireq = self.parsed_line.ireq with pip_shims.shims.global_tempdir_manager(): self._setup_info = SetupInfo.from_ireq(ireq, subdir=subdir) return self._setup_info @setup_info.setter def setup_info(self, setup_info): self._setup_info = setup_info if self._parsed_line: self._parsed_line.setup_info = setup_info @req.default def get_requirement(self): # type: () -> PackagingRequirement name = None # type: Optional[STRING_TYPE] if self.name: name = self.name elif self.link and self.link.egg_fragment: name = self.link.egg_fragment url = None if self.uri: url = self.uri elif self.link is not None: url = self.link.url_without_fragment if not name: raise ValueError( "pipenv requires an #egg fragment for version controlled " "dependencies. Please install remote dependency " "in the form {0}#egg=<package-name>.".format(url) ) req = init_requirement(canonicalize_name(self.name)) req.editable = self.editable if not getattr(req, "url", None): if url is not None: url = add_ssh_scheme_to_git_uri(url) elif self.uri is not None: link = self.get_link_from_line(self.uri).link if link: url = link.url_without_fragment if ( url and url.startswith("git+file:/") and not url.startswith("git+file:///") ): url = url.replace("git+file:/", "git+file:///") if url: req.url = url line = url if url else self.vcs_uri if self.editable: line = "-e {0}".format(line) req.line = line if self.ref: req.revision = self.ref if self.extras: req.extras = self.extras req.vcs = self.vcs if self.path and self.link and self.link.scheme.startswith("file"): req.local_file = True req.path = self.path req.link = self.link if ( self.link and self.link.url_without_fragment and self.uri and self.uri != unquote(self.link.url_without_fragment) and "git+ssh://" in self.link.url and "git+git@" in self.uri ): req.line = self.uri url = self.link.url_without_fragment if ( url and url.startswith("git+file:/") and not url.startswith("git+file:///") ): url = url.replace("git+file:/", "git+file:///") req.url = url return req @property def repo(self): # type: () -> VCSRepository if self._repo is None: if self._parsed_line and self._parsed_line.vcsrepo: self._repo = self._parsed_line.vcsrepo else: self._repo = self.get_vcs_repo() if self._parsed_line: self._parsed_line.vcsrepo = self._repo return self._repo def get_checkout_dir(self, src_dir=None): # type: (Optional[S]) -> STRING_TYPE src_dir = os.environ.get("PIP_SRC", None) if not src_dir else src_dir checkout_dir = None if self.is_local: path = self.path if not path: path = pip_shims.shims.url_to_path(self.uri) if path and os.path.exists(path): checkout_dir = os.path.abspath(path) return checkout_dir if src_dir is not None: checkout_dir = os.path.join(os.path.abspath(src_dir), self.name) mkdir_p(src_dir) return checkout_dir return os.path.join(create_tracked_tempdir(prefix="requirementslib"), self.name) def get_vcs_repo(self, src_dir=None, checkout_dir=None): # type: (Optional[STRING_TYPE], STRING_TYPE) -> VCSRepository from .vcs import VCSRepository if checkout_dir is None: checkout_dir = self.get_checkout_dir(src_dir=src_dir) vcsrepo = VCSRepository( url=expand_env_variables(self.url), name=self.name, ref=self.ref if self.ref else None, checkout_directory=checkout_dir, vcs_type=self.vcs, subdirectory=self.subdirectory, ) if not self.is_local: vcsrepo.obtain() pyproject_info = None if self.subdirectory: self.setup_path = os.path.join(checkout_dir, self.subdirectory, "setup.py") self.pyproject_path = os.path.join( checkout_dir, self.subdirectory, "pyproject.toml" ) pyproject_info = get_pyproject(os.path.join(checkout_dir, self.subdirectory)) else: self.setup_path = os.path.join(checkout_dir, "setup.py") self.pyproject_path = os.path.join(checkout_dir, "pyproject.toml") pyproject_info = get_pyproject(checkout_dir) if pyproject_info is not None: pyproject_requires, pyproject_backend = pyproject_info self.pyproject_requires = tuple(pyproject_requires) self.pyproject_backend = pyproject_backend return vcsrepo def get_commit_hash(self): # type: () -> STRING_TYPE with pip_shims.shims.global_tempdir_manager(): hash_ = self.repo.get_commit_hash() return hash_ def update_repo(self, src_dir=None, ref=None): # type: (Optional[STRING_TYPE], Optional[STRING_TYPE]) -> STRING_TYPE if ref: self.ref = ref repo_hash = None if not self.is_local and self.ref is not None: self.repo.checkout_ref(self.ref) repo_hash = self.get_commit_hash() if self.req: self.req.revision = repo_hash return repo_hash @contextmanager def locked_vcs_repo(self, src_dir=None): # type: (Optional[AnyStr]) -> Generator[VCSRepository, None, None] if not src_dir: src_dir = create_tracked_tempdir(prefix="requirementslib-", suffix="-src") vcsrepo = self.get_vcs_repo(src_dir=src_dir) if not self.req: if self.parsed_line is not None: self.req = self.parsed_line.requirement else: self.req = self.get_requirement() with pip_shims.shims.global_tempdir_manager(): revision = self.req.revision = vcsrepo.get_commit_hash() # Remove potential ref in the end of uri after ref is parsed if self.link and "@" in self.link.show_url and self.uri and "@" in self.uri: uri, ref = split_ref_from_uri(self.uri) checkout = revision if checkout and ref and ref in checkout: self.uri = uri orig_repo = self._repo self._repo = vcsrepo if self._parsed_line: self._parsed_line.vcsrepo = vcsrepo if self._setup_info: self._setup_info = attr.evolve( self._setup_info, requirements=(), _extras_requirements=(), build_requires=(), setup_requires=(), version=None, metadata=None, ) if self.parsed_line and self._parsed_line: self._parsed_line.vcsrepo = vcsrepo if self.req and not self.editable: self.req.specifier = SpecifierSet("=={0}".format(self.setup_info.version)) try: yield self._repo except Exception: self._repo = orig_repo raise @classmethod def from_pipfile(cls, name, pipfile): # type: (STRING_TYPE, Dict[S, Union[Tuple[S, ...], S, bool]]) -> F creation_args = {} # type: Dict[STRING_TYPE, CREATION_ARG_TYPES] pipfile_keys = [ k for k in ( "ref", "vcs", "subdirectory", "path", "editable", "file", "uri", "extras", ) + VCS_LIST if k in pipfile ] # extras = None # type: Optional[Tuple[STRING_TYPE, ...]] for key in pipfile_keys: if key == "extras" and key in pipfile: extras = pipfile[key] if isinstance(extras, (list, tuple)): pipfile[key] = tuple(sorted({extra.lower() for extra in extras})) else: pipfile[key] = extras if key in VCS_LIST and key in pipfile_keys: creation_args["vcs"] = key target = pipfile[key] if isinstance(target, six.string_types): drive, path = os.path.splitdrive(target) if ( not drive and not os.path.exists(target) and ( is_valid_url(target) or is_file_url(target) or target.startswith("git@") ) ): creation_args["uri"] = target else: creation_args["path"] = target if os.path.isabs(target): creation_args["uri"] = pip_shims.shims.path_to_url(target) elif key in pipfile_keys: creation_args[key] = pipfile[key] creation_args["name"] = name cls_inst = cls(**creation_args) # type: ignore return cls_inst @classmethod def from_line(cls, line, editable=None, extras=None, parsed_line=None): # type: (AnyStr, Optional[bool], Optional[Tuple[AnyStr, ...]], Optional[Line]) -> F parsed_line = Line(line) return vcs_req_from_parsed_line(parsed_line) @property def line_part(self): # type: () -> STRING_TYPE """requirements.txt compatible line part sans-extras.""" base = "" # type: STRING_TYPE if self.is_local: base_link = self.link if not self.link: base_link = self.get_link() if base_link and base_link.egg_fragment: final_format = "{{0}}#egg={0}".format(base_link.egg_fragment) else: final_format = "{0}" base = final_format.format(self.vcs_uri) elif self._parsed_line is not None and ( self._parsed_line.is_direct_url and self._parsed_line.line_with_prefix ): return self._parsed_line.line_with_prefix elif getattr(self, "_base_line", None) and ( isinstance(self._base_line, six.string_types) ): base = self._base_line else: base = getattr(self, "link", self.get_link()).url if base and self.extras and extras_to_string(self.extras) not in base: if self.subdirectory: base = "{0}".format(self.get_link().url) else: base = "{0}{1}".format(base, extras_to_string(sorted(self.extras))) if "git+file:/" in base and "git+file:///" not in base: base = base.replace("git+file:/", "git+file:///") if self.editable and not base.startswith("-e "): base = "-e {0}".format(base) return base @staticmethod def _choose_vcs_source(pipfile): # type: (Dict[S, Union[S, Any]]) -> Dict[S, Union[S, Any]] src_keys = [k for k in pipfile.keys() if k in ["path", "uri", "file"]] vcs_type = "" # type: Optional[STRING_TYPE] alt_type = "" # type: Optional[STRING_TYPE] vcs_value = "" # type: STRING_TYPE if src_keys: chosen_key = next(iter(src_keys)) vcs_type = pipfile.pop("vcs") if chosen_key in pipfile: vcs_value = pipfile[chosen_key] alt_type, pipfile_url = split_vcs_method_from_uri(vcs_value) if vcs_type is None: vcs_type = alt_type if vcs_type and pipfile_url: pipfile[vcs_type] = pipfile_url for removed in src_keys: pipfile.pop(removed) return pipfile @property def pipfile_part(self): # type: () -> Dict[S, Dict[S, Union[List[S], S, bool, RequirementType, pip_shims.shims.Link]]] excludes = [ "_repo", "_base_line", "setup_path", "_has_hashed_name", "pyproject_path", "pyproject_requires", "pyproject_backend", "_setup_info", "_parsed_line", "_uri_scheme", ] filter_func = lambda k, v: bool(v) is True and k.name not in excludes # noqa pipfile_dict = attr.asdict(self, filter=filter_func).copy() name = pipfile_dict.pop("name", None) if name is None: if self.name: name = self.name elif self.parsed_line and self.parsed_line.name: name = self.name = self.parsed_line.name elif self.setup_info and self.setup_info.name: name = self.name = self.setup_info.name if "vcs" in pipfile_dict: pipfile_dict = self._choose_vcs_source(pipfile_dict) name, _ = pip_shims.shims._strip_extras(name) return {name: pipfile_dict} # type: ignore @attr.s(eq=True, order=True, hash=True) class Requirement(object): _name = attr.ib(eq=True, order=True) # type: STRING_TYPE vcs = attr.ib( default=None, validator=attr.validators.optional(validate_vcs), eq=True, order=True, ) # type: Optional[STRING_TYPE] req = attr.ib( default=None, eq=True, order=True ) # type: Optional[Union[VCSRequirement, FileRequirement, NamedRequirement]] markers = attr.ib(default=None, eq=True, order=True) # type: Optional[STRING_TYPE] _specifiers = attr.ib( validator=attr.validators.optional(validate_specifiers), eq=True, order=True ) # type: Optional[STRING_TYPE] index = attr.ib(default=None, eq=True, order=True) # type: Optional[STRING_TYPE] editable = attr.ib(default=None, eq=True, order=True) # type: Optional[bool] hashes = attr.ib( factory=frozenset, converter=frozenset, eq=True, order=True ) # type: FrozenSet[STRING_TYPE] extras = attr.ib(factory=tuple, eq=True, order=True) # type: Tuple[STRING_TYPE, ...] abstract_dep = attr.ib( default=None, eq=False, order=False ) # type: Optional[AbstractDependency] _line_instance = attr.ib(default=None, eq=False, order=False) # type: Optional[Line] _ireq = attr.ib( default=None, eq=False, order=False ) # type: Optional[pip_shims.InstallRequirement] def __hash__(self): return hash(self.as_line()) @_name.default def get_name(self): # type: () -> Optional[STRING_TYPE] if self.req is not None: return self.req.name return None @property def name(self): # type: () -> Optional[STRING_TYPE] if self._name is not None: return self._name name = None if self.req and self.req.name: name = self.req.name elif self.req and self.is_file_or_url and self.req.setup_info: name = self.req.setup_info.name self._name = name return name @property def requirement(self): # type: () -> Optional[PackagingRequirement] if self.req: return self.req.req return None def add_hashes(self, hashes): # type: (Union[S, List[S], Set[S], Tuple[S, ...]]) -> Requirement new_hashes = set() # type: Set[STRING_TYPE] if self.hashes is not None: new_hashes |= set(self.hashes) if isinstance(hashes, six.string_types): new_hashes.add(hashes) else: new_hashes |= set(hashes) return attr.evolve(self, hashes=tuple(new_hashes)) def get_hashes_as_pip(self, as_list=False): # type: (bool) -> Union[STRING_TYPE, List[STRING_TYPE]] hashes = "" # type: Union[STRING_TYPE, List[STRING_TYPE]] if as_list: hashes = [] if self.hashes: hashes = [HASH_STRING.format(h) for h in self.hashes] else: hashes = "" if self.hashes: hashes = "".join([HASH_STRING.format(h) for h in self.hashes]) return hashes @property def hashes_as_pip(self): # type: () -> STRING_TYPE hashes = self.get_hashes_as_pip() assert isinstance(hashes, six.string_types) return hashes @property def markers_as_pip(self): # type: () -> S if self.markers: return " ; {0}".format(self.markers).replace('"', "'") return "" @property def extras_as_pip(self): # type: () -> STRING_TYPE if self.extras: return "[{0}]".format( ",".join(sorted([extra.lower() for extra in self.extras])) # type: ignore ) return "" @cached_property def commit_hash(self): # type: () -> Optional[S] if self.req is None or not isinstance(self.req, VCSRequirement): return None commit_hash = None if self.req is not None: with self.req.locked_vcs_repo() as repo: commit_hash = repo.get_commit_hash() return commit_hash @_specifiers.default def get_specifiers(self): # type: () -> S if self.req and self.req.req and self.req.req.specifier: return specs_to_string(self.req.req.specifier) return "" def update_name_from_path(self, path): metadata = get_metadata(path) name = self.name if metadata is not None: metadata_name = metadata.get("name") if metadata_name and metadata_name != "wheel": name = metadata_name if name is not None: if self.req.name is None: self.req.name = name if self.req.req and self.req.req.name is None: self.req.req.name = name if self._line_instance._name is None: self._line_instance.name = name if self.req._parsed_line._name is None: self.req._parsed_line.name = name if self.req._setup_info and self.req._setup_info.name is None: self.req._setup_info.name = name def get_line_instance(self): # type: () -> Line line_parts = [] if self.req: if self.req.line_part.startswith("-e "): line_parts.extend(self.req.line_part.split(" ", 1)) else: line_parts.append(self.req.line_part) if not self.is_vcs and not self.vcs and self.extras_as_pip: line_parts.append(self.extras_as_pip) if self._specifiers and not (self.is_file_or_url or self.is_vcs): line_parts.append(self._specifiers) if self.markers: line_parts.append("; {0}".format(self.markers.replace('"', "'"))) if self.hashes_as_pip and not (self.editable or self.vcs or self.is_vcs): line_parts.append(self.hashes_as_pip) if self.editable: if line_parts[0] == "-e": line = "".join(line_parts[1:]) else: line = "".join(line_parts) if self.markers: line = '"{0}"'.format(line) line = "-e {0}".format(line) else: line = "".join(line_parts) return Line(line) @property def line_instance(self): # type: () -> Optional[Line] if self._line_instance is None: self.line_instance = self.get_line_instance() return self._line_instance @line_instance.setter def line_instance(self, line_instance): # type: (Line) -> None if self.req: self.req._parsed_line = line_instance self._line_instance = line_instance @property def specifiers(self): # type: () -> Optional[STRING_TYPE] if self._specifiers: return self._specifiers else: specs = self.get_specifiers() if specs: self._specifiers = specs return specs if not self._specifiers and ( self.req is not None and isinstance(self.req, NamedRequirement) and self.req.version ): self._specifiers = self.req.version elif ( not self.editable and self.req and (not isinstance(self.req, NamedRequirement) and self.req.setup_info) ): if ( self.line_instance and self.line_instance.setup_info and self.line_instance.setup_info.version ): self._specifiers = "=={0}".format(self.req.setup_info.version) elif not self._specifiers: if self.req and self.req.parsed_line and self.req.parsed_line.specifiers: self._specifiers = specs_to_string(self.req.parsed_line.specifiers) elif self.line_instance and self.line_instance.specifiers: self._specifiers = specs_to_string(self.line_instance.specifiers) elif self.is_file_or_url or self.is_vcs: try: setupinfo_dict = self.run_requires() except Exception: setupinfo_dict = None if setupinfo_dict is not None: self._specifiers = "=={0}".format(setupinfo_dict.get("version")) if self._specifiers: specset = SpecifierSet(self._specifiers) if self.line_instance and not self.line_instance.specifiers: self.line_instance.specifiers = specset if self.req: if self.req._parsed_line and not self.req._parsed_line.specifiers: self.req._parsed_line.specifiers = specset elif not self.req._parsed_line and self.line_instance: self.req._parsed_line = self.line_instance if self.req and self.req.req and not self.req.req.specifier: self.req.req.specifier = specset return self._specifiers @property def is_vcs(self): # type: () -> bool return isinstance(self.req, VCSRequirement) @property def build_backend(self): # type: () -> Optional[STRING_TYPE] if self.req is not None and ( not isinstance(self.req, NamedRequirement) and self.req.is_local ): with pip_shims.shims.global_tempdir_manager(): setup_info = self.run_requires() build_backend = setup_info.get("build_backend") return build_backend return "setuptools.build_meta" @property def uses_pep517(self): # type: () -> bool if self.build_backend: return True return False @property def is_file_or_url(self): # type: () -> bool return isinstance(self.req, FileRequirement) @property def is_named(self): # type: () -> bool return isinstance(self.req, NamedRequirement) @property def is_wheel(self): # type: () -> bool if ( self.req and not isinstance(self.req, NamedRequirement) and (self.req.link is not None and self.req.link.is_wheel) ): return True return False @property def normalized_name(self): # type: () -> S return canonicalize_name(self.name) def copy(self): return attr.evolve(self) @classmethod @lru_cache() def from_line(cls, line): # type: (AnyStr) -> Requirement if isinstance(line, pip_shims.shims.InstallRequirement): line = format_requirement(line) parsed_line = Line(line) r = ( None ) # type: Optional[Union[VCSRequirement, FileRequirement, NamedRequirement]] if ( (parsed_line.is_file and parsed_line.is_installable) or parsed_line.is_remote_url ) and not parsed_line.is_vcs: r = file_req_from_parsed_line(parsed_line) elif parsed_line.is_vcs: r = vcs_req_from_parsed_line(parsed_line) elif line == "." and not is_installable_file(line): raise RequirementError( "Error parsing requirement %s -- are you sure it is installable?" % line ) else: r = named_req_from_parsed_line(parsed_line) req_markers = None if parsed_line.markers: req_markers = PackagingRequirement("fakepkg; {0}".format(parsed_line.markers)) if r is not None and r.req is not None: r.req.marker = getattr(req_markers, "marker", None) if req_markers else None args = {} # type: Dict[STRING_TYPE, CREATION_ARG_TYPES] args = { "name": r.name, "vcs": parsed_line.vcs, "req": r, "markers": parsed_line.markers, "editable": parsed_line.editable, "line_instance": parsed_line, } if parsed_line.extras: extras = () # type: Tuple[STRING_TYPE, ...] extras = tuple(sorted(dedup([extra.lower() for extra in parsed_line.extras]))) args["extras"] = extras if r is not None: r.extras = extras elif r is not None and r.extras is not None: args["extras"] = tuple( sorted(dedup([extra.lower() for extra in r.extras])) ) # type: ignore if r.req is not None: r.req.extras = args["extras"] if parsed_line.hashes: args["hashes"] = tuple(parsed_line.hashes) # type: ignore cls_inst = cls(**args) # type: ignore return cls_inst @classmethod def from_ireq(cls, ireq): return cls.from_line(format_requirement(ireq)) @classmethod def from_metadata(cls, name, version, extras, markers): return cls.from_ireq( make_install_requirement(name, version, extras=extras, markers=markers) ) @classmethod def from_pipfile(cls, name, pipfile): from .markers import PipenvMarkers _pipfile = {} if hasattr(pipfile, "keys"): _pipfile = dict(pipfile).copy() _pipfile["version"] = get_version(pipfile) vcs = next(iter([vcs for vcs in VCS_LIST if vcs in _pipfile]), None) if vcs: _pipfile["vcs"] = vcs r = VCSRequirement.from_pipfile(name, pipfile) elif any(key in _pipfile for key in ["path", "file", "uri"]): r = FileRequirement.from_pipfile(name, pipfile) else: r = NamedRequirement.from_pipfile(name, pipfile) markers = PipenvMarkers.from_pipfile(name, _pipfile) req_markers = None if markers: markers = str(markers) req_markers = PackagingRequirement("fakepkg; {0}".format(markers)) if r.req is not None: r.req.marker = req_markers.marker extras = _pipfile.get("extras") if r.req: if r.req.specifier: r.req.specifier = SpecifierSet(_pipfile["version"]) r.req.extras = ( tuple(sorted(dedup([extra.lower() for extra in extras]))) if extras else () ) args = { "name": r.name, "vcs": vcs, "req": r, "markers": markers, "extras": tuple(_pipfile.get("extras", ())), "editable": _pipfile.get("editable", False), "index": _pipfile.get("index"), } if any(key in _pipfile for key in ["hash", "hashes"]): args["hashes"] = _pipfile.get("hashes", [pipfile.get("hash")]) cls_inst = cls(**args) return cls_inst def as_line( self, sources=None, include_hashes=True, include_extras=True, include_markers=True, as_list=False, ): """Format this requirement as a line in requirements.txt. If ``sources`` provided, it should be an sequence of mappings, containing all possible sources to be used for this requirement. If ``sources`` is omitted or falsy, no index information will be included in the requirement line. """ assert self.line_instance is not None parts = self.line_instance.get_line( with_prefix=True, with_hashes=include_hashes, with_markers=include_markers, as_list=as_list, ) if sources and self.requirement and not (self.line_instance.is_local or self.vcs): from ..utils import prepare_pip_source_args if self.index: sources = [s for s in sources if s.get("name") == self.index] source_list = prepare_pip_source_args(sources) if as_list: parts.extend(sources) else: index_string = " ".join(source_list) parts = "{0} {1}".format(parts, index_string) return parts def get_markers(self): # type: () -> Marker markers = self.markers if markers: fake_pkg = PackagingRequirement("fakepkg; {0}".format(markers)) markers = fake_pkg.marker return markers def get_specifier(self): # type: () -> Union[SpecifierSet, LegacySpecifier] try: return Specifier(self.specifiers) except InvalidSpecifier: return LegacySpecifier(self.specifiers) def get_version(self): return pip_shims.shims.parse_version(self.get_specifier().version) def get_requirement(self): req_line = self.req.req.line if req_line.startswith("-e "): _, req_line = req_line.split(" ", 1) req = init_requirement(self.name) req.line = req_line req.specifier = SpecifierSet(self.specifiers if self.specifiers else "") if self.is_vcs or self.is_file_or_url: req.url = getattr(self.req.req, "url", self.req.link.url_without_fragment) req.marker = self.get_markers() req.extras = set(self.extras) if self.extras else set() return req @property def constraint_line(self): return self.as_line() @property def is_direct_url(self): return ( self.is_file_or_url and self.req.is_direct_url or (self.line_instance.is_direct_url or self.req.parsed_line.is_direct_url) ) def as_pipfile(self): good_keys = ( "hashes", "extras", "markers", "editable", "version", "index", ) + VCS_LIST req_dict = { k: v for k, v in attr.asdict(self, recurse=False, filter=filter_none).items() if k in good_keys } name = self.name if "markers" in req_dict and req_dict["markers"]: req_dict["markers"] = req_dict["markers"].replace('"', "'") if not self.req.name: name_carriers = (self.req, self, self.line_instance, self.req.parsed_line) name_options = [ getattr(carrier, "name", None) for carrier in name_carriers if carrier is not None ] req_name = next(iter(n for n in name_options if n is not None), None) self.req.name = req_name req_name, dict_from_subreq = self.req.pipfile_part.popitem() base_dict = { k: v for k, v in dict_from_subreq.items() if k not in ["req", "link", "_setup_info"] } base_dict.update(req_dict) conflicting_keys = ("file", "path", "uri") if "file" in base_dict and any(k in base_dict for k in conflicting_keys[1:]): conflicts = [k for k in (conflicting_keys[1:],) if k in base_dict] for k in conflicts: base_dict.pop(k) if "hashes" in base_dict: _hashes = base_dict.pop("hashes") hashes = [] for _hash in _hashes: try: hashes.append(_hash.as_line()) except AttributeError: hashes.append(_hash) base_dict["hashes"] = sorted(hashes) if "extras" in base_dict: base_dict["extras"] = list(base_dict["extras"]) if len(base_dict.keys()) == 1 and "version" in base_dict: base_dict = base_dict.get("version") return {name: base_dict} def as_ireq(self): if self.line_instance and self.line_instance.ireq: return self.line_instance.ireq elif getattr(self.req, "_parsed_line", None) and self.req._parsed_line.ireq: return self.req._parsed_line.ireq kwargs = {"include_hashes": False} if (self.is_file_or_url and self.req.is_local) or self.is_vcs: kwargs["include_markers"] = False ireq_line = self.as_line(**kwargs) ireq = Line(ireq_line).ireq if not getattr(ireq, "req", None): ireq.req = self.req.req if (self.is_file_or_url and self.req.is_local) or self.is_vcs: if getattr(ireq, "req", None) and getattr(ireq.req, "marker", None): ireq.req.marker = None else: ireq.req.extras = self.req.req.extras if not ((self.is_file_or_url and self.req.is_local) or self.is_vcs): ireq.req.marker = self.req.req.marker return ireq @property def pipfile_entry(self): return self.as_pipfile().copy().popitem() @property def ireq(self): return self.as_ireq() def get_dependencies(self, sources=None): """Retrieve the dependencies of the current requirement. Retrieves dependencies of the current requirement. This only works on pinned requirements. :param sources: Pipfile-formatted sources, defaults to None :param sources: list[dict], optional :return: A set of requirement strings of the dependencies of this requirement. :rtype: set(str) """ from .dependencies import get_dependencies if not sources: sources = [ {"name": "pypi", "url": "https://pypi.org/simple", "verify_ssl": True} ] return get_dependencies(self.as_ireq(), sources=sources) def get_abstract_dependencies(self, sources=None): """Retrieve the abstract dependencies of this requirement. Returns the abstract dependencies of the current requirement in order to resolve. :param sources: A list of sources (pipfile format), defaults to None :param sources: list, optional :return: A list of abstract (unpinned) dependencies :rtype: list[ :class:`~requirementslib.models.dependency.AbstractDependency` ] """ from .dependencies import ( AbstractDependency, get_dependencies, get_abstract_dependencies, ) if not self.abstract_dep: parent = getattr(self, "parent", None) self.abstract_dep = AbstractDependency.from_requirement(self, parent=parent) if not sources: sources = [ {"url": "https://pypi.org/simple", "name": "pypi", "verify_ssl": True} ] if is_pinned_requirement(self.ireq): deps = self.get_dependencies() else: ireq = sorted(self.find_all_matches(), key=lambda k: k.version) deps = get_dependencies(ireq.pop(), sources=sources) return get_abstract_dependencies(deps, sources=sources, parent=self.abstract_dep) def find_all_matches(self, sources=None, finder=None): # type: (Optional[List[Dict[S, Union[S, bool]]]], Optional[PackageFinder]) -> List[InstallationCandidate] """Find all matching candidates for the current requirement. Consults a finder to find all matching candidates. :param sources: Pipfile-formatted sources, defaults to None :param sources: list[dict], optional :param PackageFinder finder: A **PackageFinder** instance from pip's repository implementation :return: A list of Installation Candidates :rtype: list[ :class:`~pip._internal.index.InstallationCandidate` ] """ from .dependencies import get_finder, find_all_matches if not finder: _, finder = get_finder(sources=sources) return find_all_matches(finder, self.as_ireq()) def run_requires(self, sources=None, finder=None): if self.req and self.req.setup_info is not None: info_dict = self.req.setup_info.as_dict() elif self.line_instance and self.line_instance.setup_info is not None: info_dict = self.line_instance.setup_info.as_dict() else: if not finder: from .dependencies import get_finder finder = get_finder(sources=sources) with pip_shims.shims.global_tempdir_manager(): info = SetupInfo.from_requirement(self, finder=finder) if info is None: return {} info_dict = info.get_info() if self.req and not self.req.setup_info: self.req._setup_info = info if self.req._has_hashed_name and info_dict.get("name"): self.req.name = self.name = info_dict["name"] if self.req.req.name != info_dict["name"]: self.req.req.name = info_dict["name"] return info_dict def merge_markers(self, markers): # type: (Union[AnyStr, Marker]) -> None if not markers: return self if not isinstance(markers, Marker): markers = Marker(markers) _markers = [] # type: List[Marker] ireq = self.as_ireq() if ireq and ireq.markers: ireq_marker = ireq.markers _markers.append(str(ireq_marker)) _markers.append(str(markers)) marker_str = " and ".join([normalize_marker_str(m) for m in _markers if m]) new_marker = Marker(marker_str) line = copy.deepcopy(self._line_instance) line.markers = marker_str line.parsed_marker = new_marker if getattr(line, "_requirement", None) is not None: line._requirement.marker = new_marker if getattr(line, "_ireq", None) is not None and line._ireq.req: line._ireq.req.marker = new_marker new_ireq = getattr(self, "ireq", None) if new_ireq and new_ireq.req: new_ireq.req.marker = new_marker req = self.req if req.req: req_requirement = req.req req_requirement.marker = new_marker req = attr.evolve(req, req=req_requirement, parsed_line=line) return attr.evolve( self, markers=str(new_marker), ireq=new_ireq, req=req, line_instance=line ) def file_req_from_parsed_line(parsed_line): # type: (Line) -> FileRequirement path = parsed_line.relpath if parsed_line.relpath else parsed_line.path pyproject_requires = None # type: Optional[Tuple[STRING_TYPE, ...]] if parsed_line.pyproject_requires is not None: pyproject_requires = tuple(parsed_line.pyproject_requires) pyproject_path = ( Path(parsed_line.pyproject_toml) if parsed_line.pyproject_toml else None ) req_dict = { "setup_path": parsed_line.setup_py, "path": path, "editable": parsed_line.editable, "extras": parsed_line.extras, "uri_scheme": parsed_line.preferred_scheme, "link": parsed_line.link, "uri": parsed_line.uri, "pyproject_requires": pyproject_requires, "pyproject_backend": parsed_line.pyproject_backend, "pyproject_path": pyproject_path, "parsed_line": parsed_line, "req": parsed_line.requirement, } if parsed_line.name is not None: req_dict["name"] = parsed_line.name return FileRequirement(**req_dict) # type: ignore def vcs_req_from_parsed_line(parsed_line): # type: (Line) -> VCSRequirement line = "{0}".format(parsed_line.line) if parsed_line.editable: line = "-e {0}".format(line) if parsed_line.url is not None: link = create_link( build_vcs_uri( vcs=parsed_line.vcs, uri=parsed_line.url, name=parsed_line.name, ref=parsed_line.ref, subdirectory=parsed_line.subdirectory, extras=list(parsed_line.extras), ) ) else: link = parsed_line.link pyproject_requires = () # type: Optional[Tuple[STRING_TYPE, ...]] if parsed_line.pyproject_requires is not None: pyproject_requires = tuple(parsed_line.pyproject_requires) vcs_dict = { "setup_path": parsed_line.setup_py, "path": parsed_line.path, "editable": parsed_line.editable, "vcs": parsed_line.vcs, "ref": parsed_line.ref, "subdirectory": parsed_line.subdirectory, "extras": parsed_line.extras, "uri_scheme": parsed_line.preferred_scheme, "link": link, "uri": parsed_line.uri, "pyproject_requires": pyproject_requires, "pyproject_backend": parsed_line.pyproject_backend, "pyproject_path": Path(parsed_line.pyproject_toml) if parsed_line.pyproject_toml else None, "parsed_line": parsed_line, "req": parsed_line.requirement, "base_line": line, } if parsed_line.name: vcs_dict["name"] = parsed_line.name return VCSRequirement(**vcs_dict) # type: ignore def named_req_from_parsed_line(parsed_line): # type: (Line) -> NamedRequirement if parsed_line.name is not None: return NamedRequirement( name=parsed_line.name, version=parsed_line.specifier, req=parsed_line.requirement, extras=parsed_line.extras, editable=parsed_line.editable, parsed_line=parsed_line, ) return NamedRequirement.from_line(parsed_line.line) if __name__ == "__main__": line = Line("vistir@ git+https://github.com/sarugaku/vistir.git@master") print(line)
{ "content_hash": "0d9a6d702be10064d0459abe79f93a20", "timestamp": "", "source": "github", "line_count": 3158, "max_line_length": 113, "avg_line_length": 38.027232425585815, "alnum_prop": 0.5503122658006495, "repo_name": "kennethreitz/pipenv", "id": "6831a7c8305213711d9051fb666b339c2a841c58", "size": "120115", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pipenv/vendor/requirementslib/models/requirements.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "202" }, { "name": "PowerShell", "bytes": "7195" }, { "name": "Python", "bytes": "2588085" }, { "name": "Roff", "bytes": "40754" } ], "symlink_target": "" }
import os import argparse import sys import gzip from pathlib import Path import json import pandas as pd from tqdm import tqdm import pydicom parser = argparse.ArgumentParser(description='Extract meta-data from DICOMs') parser.add_argument('--data', '-d', default='./files', help='path to DICOM format images') parser.add_argument('--out', '-o', default='dicom-metadata.csv.gz', help=('name out dataframe output, ' '(default: dicom-metadata.csv.gz), ' 'note: this is a compressed format.')) parser.add_argument('--json', '-j', default=None, help=('name of the output json file, ' '(default: <output-stem>.json)')) parser.add_argument('--number', '-n', type=int, default=None, help=('limit the number of DICOMs to process ' ' (default: None).')) def recurse(ds): """ Recurses through sequences and adds them to a dictionary Does not save elements longer than 100 elements, but notes their existence in the final dictionary. """ tmp_dict = dict() for elem in ds: if elem.VR == 'SQ': # do not include look up tables if 'LUT' not in elem.name: [recurse(item) for item in elem] else: e = elem.tag.group << 16 | elem.tag.element # Save element value to a dictionary # *unless* it is huge - these are usually images if hasattr(elem.value, '__len__'): if elem.value.__len__() > 100: tmp_dict[e] = None else: if type(elem.value) is pydicom.multival.MultiValue: tmp_dict[e] = list(elem.value) else: tmp_dict[e] = elem.value else: if type(elem.value) is pydicom.multival.MultiValue: tmp_dict[e] = list(elem.value) else: tmp_dict[e] = elem.value return tmp_dict if __name__ == "__main__": args = parser.parse_args() base_path = Path(args.data) out_filename = args.out if args.json is not None: json_filename = args.json else: json_filename = out_filename if json_filename.endswith('.gz'): json_filename = json_filename[0:-3] if json_filename.endswith('.csv'): json_filename = json_filename[0:-4] json_filename += '.json' # get list of all dicoms under the given path files = list() for h in os.listdir(base_path): for pt in os.listdir(base_path / h): for st in os.listdir(base_path / f'{h}{os.sep}{pt}'): dcm_path = f'{base_path}{os.sep}{h}{os.sep}{pt}{os.sep}{st}' dcms = os.listdir(dcm_path) files.extend([f'{dcm_path}{os.sep}{d}' for d in dcms]) files.sort() N = len(files) print(f'Found {N} files.') if args.number is not None: if args.number < N: # limit number of dicoms print(f'Limiting parsing to {args.number} of {N} DICOMs.') N = args.number if N == 0: print('No files to process. Exiting.') sys.exit() dicom_tabular_data = list() with open(json_filename, 'w') as fp: # initialize the array in the json file fp.write('[\n') for i in tqdm(range(N)): if i > 0: fp.write(',\n') dicom_full_path = files[i] # dicom filename is the last name in filepath fn = dicom_full_path.split('/')[-1].split('.')[0] # prepare the json output as a dictionary with this dicom fn as key fp.write('{') fp.write(f'"{fn}": ') # load info from dicom with open(dicom_full_path, 'rb') as dcm_fp: plan = pydicom.dcmread(dcm_fp, stop_before_pixels=True) field_dict = dict() dicom_json = dict() # go through each element for elem in plan: # index the dictionary using a long value of group, element e = (elem.tag.group << 16) | elem.tag.element # sequence data goes into JSON if elem.VR == 'SQ': # store number of items in the structured/flat data field_dict[e] = elem.value.__len__() # make a dict for the sequence, which will go into json # don't store look up tables because # they're huge and not human readable if 'LUT' not in elem.name: dicom_json[e] = [recurse(item) for item in elem] else: # three "real" data-types: number, string, or list of things field_dict[e] = elem.value field_dict['dicom'] = fn dicom_tabular_data.append(field_dict) # convert dictionary to json js = json.dumps(dicom_json) # write to json file fp.write(js) # finish the dicom dictionary fp.write('}') # end of array in json file fp.write('\n]') # combine list of dictionary into a dataframe df = pd.DataFrame(dicom_tabular_data) # make the dicom filename the index df.set_index('dicom', inplace=True) # write to file if out_filename.endswith('.gz'): df.to_csv(out_filename, sep=',', compression='gzip') else: df.to_csv(out_filename, sep=',')
{ "content_hash": "daaf58e9d13e9a2bd0e9f0f2cad4ba80", "timestamp": "", "source": "github", "line_count": 168, "max_line_length": 80, "avg_line_length": 33.76190476190476, "alnum_prop": 0.5202750352609309, "repo_name": "MIT-LCP/mimic-code", "id": "4feece53bb705d9d34cee58d19e0e701db7991b3", "size": "5987", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "mimic-iv-cxr/dcm/export_metadata.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "15422" }, { "name": "Dockerfile", "bytes": "850" }, { "name": "HTML", "bytes": "2465758" }, { "name": "Jupyter Notebook", "bytes": "4820852" }, { "name": "Makefile", "bytes": "15380" }, { "name": "PLpgSQL", "bytes": "38881" }, { "name": "Python", "bytes": "74332" }, { "name": "Shell", "bytes": "70360" }, { "name": "TeX", "bytes": "54148" } ], "symlink_target": "" }
"""End to end tests for lib.flows.general.registry.""" import os from grr.endtoend_tests import base from grr.lib import aff4 from grr.lib import data_store from grr.lib import flow_utils from grr.lib import rdfvalue from grr.lib import utils from grr.lib.flows.console import debugging class TestFindWindowsRegistry(base.ClientTestBase): """Test that user listing from the registry works. We basically list the registry and then run Find on the same place, we expect a single ProfileImagePath value for each user. TODO(user): this is excluded from automated tests for now because it needs to run two flows and defines its own runTest to do so. We should support this but it requires more work. """ platforms = ["Windows"] reg_path = ("/HKEY_LOCAL_MACHINE/SOFTWARE/Microsoft/Windows NT/" "CurrentVersion/ProfileList/") output_path = "analysis/find/test" def runTest(self): """Launch our flows.""" for flow, args in [ ("ListDirectory", {"pathspec": rdfvalue.PathSpec( pathtype=rdfvalue.PathSpec.PathType.REGISTRY, path=self.reg_path)}), ("FindFiles", {"findspec": rdfvalue.FindSpec( pathspec=rdfvalue.PathSpec( path=self.reg_path, pathtype=rdfvalue.PathSpec.PathType.REGISTRY), path_regex="ProfileImagePath"), "output": self.output_path})]: if self.local_worker: self.session_id = debugging.StartFlowAndWorker( self.client_id, flow, **args) else: self.session_id = flow_utils.StartFlowAndWait( self.client_id, flow_name=flow, token=self.token, **args) self.CheckFlow() def CheckFlow(self): """Check that all profiles listed have an ProfileImagePath.""" urn = self.client_id.Add("registry").Add(self.reg_path) fd = aff4.FACTORY.Open(urn, mode="r", token=self.token) user_accounts = sorted([x.urn for x in fd.OpenChildren() if x.urn.Basename().startswith("S-")]) urn = self.client_id.Add(self.output_path) fd = aff4.FACTORY.Open(urn, token=self.token) hits = sorted([x.aff4path for x in fd]) self.assertGreater(len(hits), 1) self.assertEqual(len(hits), len(user_accounts)) for x, y in zip(user_accounts, hits): self.assertEqual(x.Add("ProfileImagePath"), y) class TestClientRegistry(base.AutomatedTest): """Tests if listing registry keys works on Windows.""" platforms = ["Windows"] flow = "ListDirectory" args = {"pathspec": rdfvalue.PathSpec( path="HKEY_LOCAL_MACHINE", pathtype=rdfvalue.PathSpec.PathType.REGISTRY)} output_path = "/registry/HKEY_LOCAL_MACHINE" def CheckFlow(self): urn = self.client_id.Add(self.output_path) fd = aff4.FACTORY.Open(urn, mode="r", token=self.token) children = list(fd.OpenChildren()) self.assertTrue("SYSTEM" in [os.path.basename(utils.SmartUnicode(child.urn)) for child in children]) def tearDown(self): urn = self.client_id.Add(self.output_path) data_store.DB.DeleteSubject(str(urn.Add("SYSTEM")), token=self.token) data_store.DB.DeleteSubject(str(urn), token=self.token)
{ "content_hash": "f454d92bff37d3b3ee800df9e907be60", "timestamp": "", "source": "github", "line_count": 91, "max_line_length": 80, "avg_line_length": 35.26373626373626, "alnum_prop": 0.6653162979121221, "repo_name": "bgalehouse/grr", "id": "c5e8d18c6a530e67c4b874788152d8b920155b5f", "size": "3231", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "endtoend_tests/registry.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Assembly", "bytes": "227" }, { "name": "Batchfile", "bytes": "14993" }, { "name": "C", "bytes": "9062" }, { "name": "C++", "bytes": "55149" }, { "name": "CSS", "bytes": "12047" }, { "name": "Groff", "bytes": "444" }, { "name": "HTML", "bytes": "48624" }, { "name": "JavaScript", "bytes": "230351" }, { "name": "Makefile", "bytes": "5863" }, { "name": "Protocol Buffer", "bytes": "181723" }, { "name": "Python", "bytes": "4855590" }, { "name": "Ruby", "bytes": "4931" }, { "name": "Shell", "bytes": "45459" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('tests', '0006_image_file_size'), ] operations = [ migrations.CreateModel( name='SingleEventPage', fields=[ ( 'eventpage_ptr', models.OneToOneField( auto_created=True, to='tests.EventPage', serialize=False, parent_link=True, primary_key=True ) ), ( 'excerpt', models.TextField( help_text='Short text to describe what is this action about', max_length=255, null=True, blank=True ) ), ], bases=('tests.eventpage',), ), ]
{ "content_hash": "da952f47f3f3eb978bcc78e753fb9c56", "timestamp": "", "source": "github", "line_count": 38, "max_line_length": 85, "avg_line_length": 27.63157894736842, "alnum_prop": 0.3904761904761905, "repo_name": "serzans/wagtail", "id": "26279e569ad92d48e3ac9f6210ef7b7cb642e260", "size": "1074", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "wagtail/tests/testapp/migrations/0007_singleeventpage.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "152982" }, { "name": "HTML", "bytes": "251781" }, { "name": "JavaScript", "bytes": "92398" }, { "name": "Makefile", "bytes": "548" }, { "name": "Python", "bytes": "1670621" }, { "name": "Shell", "bytes": "7388" } ], "symlink_target": "" }
import os import pytest from .utils import * FIXTURES_DIR = os.path.join(os.path.dirname(__file__), "../../samples/x-dns-peering") @pytest.fixture(scope="module") def resources(recursive_plan_runner): _, resources = recursive_plan_runner( FIXTURES_DIR, tf_var_file=os.path.join(FIXTURES_DIR, "x-demo.tfvars"), project_id="testonly", project_create="true" ) return resources def test_resource_count(resources): "Test total number of resources created." assert len(resources) == 42 def test_apigee_instance(resources): "Test Apigee Instance Resource" assert_instance(resources, "europe-west1", "10.0.0.0/22") def test_apigee_instance_attachment(resources): "Test Apigee Instance Attachments." assert_instance_attachment(resources, ["test1", "test2"]) def test_envgroup_attachment(resources): "Test Apigee Envgroup Attachments." assert_envgroup_attachment(resources, ["test1", "test2"]) def test_envgroup(resources): "Test env group." assert_envgroup_name(resources, "test") def test_envgroup_hostnames(resources): "Test env group." assert_envgroup_hostnames(resources, ["test-api.internal", "test.api.example.com"]) def test_dns_entries(resources): "Test the necessary DNS entries" record_sets = [ r["values"] for r in resources if r["type"] == "google_dns_record_set" ] assert len(record_sets) == 2 record_names = [ r["name"] for r in record_sets if r["type"] == "A" ] assert set(record_names) == set(["test-api.internal.", "demo.internal."])
{ "content_hash": "d4a60e5562d047b3f4f84582a41377f8", "timestamp": "", "source": "github", "line_count": 56, "max_line_length": 87, "avg_line_length": 28.464285714285715, "alnum_prop": 0.6693851944792973, "repo_name": "apigee/terraform-modules", "id": "3a11d8c347d359c1ad75aa0508472e10550bc356", "size": "2171", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "tests/samples/test_dns_peering.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "HCL", "bytes": "55853" }, { "name": "Python", "bytes": "30605" }, { "name": "Shell", "bytes": "3415" } ], "symlink_target": "" }
""" Implementation of "clone" + "promote" functionality. Cloning creates a copy of a trove on a related branch, with the only link back to the original branch being through the "clonedFrom" link. """ # NOTE FOR READING THE CODE: creating the copy is easy. It's determining # whether or not the clone is necessary that is complicated. To that end # we have: # # The chooser: The chooser contains the algorithm for determining whether # a particular trove should be cloned or not, and where it # should be cloned. # # The leafMap: keeps track of the relevant current state of the repository - # what troves are at the leaves, and where they were cloned # from. # # The cloneMap: keeps track of the relationship between troves we might clone # and where they would be cloned to. # # The cloneJob: keeps track of the actual clones we're going to perform # as well as the clones we would perform but aren't because # they have already been cloned. # # I've been thinking about combining the cloneMap and leafMap. import itertools import os import tempfile import time from conary import callbacks from conary import errors, files from conary import trove from conary import versions from conary.build import nextversion from conary.conarycfg import selectSignatureKey from conary.deps import deps from conary.lib import api, log from conary.repository import changeset from conary.repository import trovesource V_LOADED = 0 V_BREQ = 1 V_REFTRV = 2 # don't change DEFAULT_MESSAGE = 1 class CloneJob(object): def __init__(self, options): self.cloneJob = {} self.preCloned = {} self.options = options def add(self, troveTup): self.cloneJob[troveTup] = None def alreadyCloned(self, troveTup): self.cloneJob.pop(troveTup, False) self.preCloned[troveTup] = True def target(self, troveTup, targetVersion): self.cloneJob[troveTup] = targetVersion def iterTargetList(self): return self.cloneJob.iteritems() def getTrovesToClone(self): return self.cloneJob.keys() def getPreclonedTroves(self): return self.preCloned.keys() def isEmpty(self): return not self.cloneJob # target for the maximum number of files to handle in one pass MAX_CLONE_FILES = 5000 # threshhold for using a changeset instead of getting individual files CHANGESET_MULTIPLE = 3 class ClientClone(object): __developer_api__ = True def createCloneChangeSet(self, targetBranch, troveList, updateBuildInfo=True, message=DEFAULT_MESSAGE, infoOnly=False, fullRecurse=False, cloneSources=False, callback=None, trackClone=True, excludeGroups=False): targetMap = dict((x[1].branch(), targetBranch) for x in troveList) return self.createTargetedCloneChangeSet(targetMap, troveList, fullRecurse=fullRecurse, cloneSources=cloneSources, trackClone=trackClone, callback=callback, message=message, updateBuildInfo=updateBuildInfo, infoOnly=infoOnly, excludeGroups=excludeGroups) @api.developerApi def createTargetedCloneChangeSet(self, targetMap, troveList, updateBuildInfo=True, infoOnly=False, callback=None, message=DEFAULT_MESSAGE, trackClone=True, fullRecurse=True, cloneOnlyByDefaultTroves=False, cloneSources=True, excludeGroups=False): cloneOptions = CloneOptions(fullRecurse=fullRecurse, cloneSources=cloneSources, trackClone=trackClone, callback=callback, message=message, cloneOnlyByDefaultTroves=cloneOnlyByDefaultTroves, updateBuildInfo=updateBuildInfo, infoOnly=infoOnly, bumpGroupVersions=True, excludeGroups=excludeGroups) chooser = CloneChooser(targetMap, troveList, cloneOptions) return self._createCloneChangeSet(chooser, cloneOptions) # bw compatibility createSiblingCloneChangeSet = createTargetedCloneChangeSet def createCloneChangeSetWithOptions(self, chooser, cloneOptions): return self._createCloneChangeSet(chooser, cloneOptions) def _createCloneChangeSet(self, chooser, cloneOptions): callback = cloneOptions.callback troveCache = TroveCache(self.repos, callback) cloneJob, cloneMap, leafMap = self._createCloneJob(cloneOptions, chooser, troveCache) if cloneJob.isEmpty(): log.warning('Nothing to clone!') return False, None newTroveList = self._buildTroves(chooser, cloneMap, cloneJob, leafMap, troveCache, callback) if newTroveList is None: return False, None _logMe('new troves calculated') if cloneOptions.infoOnly: # build an absolute changeset. it's faster and easier. cs = changeset.ChangeSet() for oldVersion, newTrove in newTroveList: cs.newTrove(newTrove.diff(None, absolute = True)[0]) callback.done() return True, cs finalCs = self._buildChangeSet(troveCache, newTroveList, callback) callback.prefix = '' callback.done() return True, finalCs def _buildChangeSet(self, troveCache, finalTroveList, callback): def _sameHost(v1, v2): return v1.trailingLabel().getHost() == v2.trailingLabel().getHost() # What should each TroveChangeSet be relative to? If the original # version was on the same server, great (because we don't have to # include any file contents!). Otherwise, look for something on the # target label because it is likely close to the new one. # # Note that what the diff is relative to is not the same as the # fromVersion in the finalTroveList. fromVersion is the version # we're cloning/shadowing from. The diff is always relative to # something in the target repository. We call the version the diff # is relative to the oldVersion. searchDict = {} for (fromVersion, finalTrove) in finalTroveList: if not _sameHost(fromVersion, finalTrove.getVersion()): name, version, flavor = finalTrove.getNameVersionFlavor() label = version.trailingLabel() searchDict.setdefault(name, {}) searchDict[name].setdefault(label, []) searchDict[name][label].append(flavor) matches = self.repos.getTroveLeavesByLabel(searchDict) oldTrovesNeeded = [] for (fromVersion, finalTrove) in finalTroveList: name, version, flavor = finalTrove.getNameVersionFlavor() if _sameHost(fromVersion, finalTrove.getVersion()): oldTrovesNeeded.append((name, fromVersion, flavor)) else: match = None versionD = matches.get(name, {}) for matchVersion, flavorList in versionD.iteritems(): if (matchVersion.trailingLabel() == version.trailingLabel() and flavor in flavorList): match = matchVersion if match is None: # keep oldTrovesNeeded parallel to finalTroveList oldTrovesNeeded.append(None) else: oldTrovesNeeded.append((name, match, flavor)) oldTroves = troveCache.getTroves( [ x for x in oldTrovesNeeded if x is not None ], withFiles=True) # we periodically write file contents to disk and merge in a new # changeset to save RAM. promotes can get large. # # Now to try and explain getting file streams and contents. If there # are few contents changed, we're better off using getFileVersions, but # if lots changes, we're better off just grabbing the whole bloody # changeset. If more than 1/3rd of the files changed, let's grab the # changeset. Note that this percent is completely arbitrary. We also # want to consolidate getFileVersions() and createChangeSet() calls. # Once we've found 5000 files to add to the current change set, we'll # add those, write the change set, merge it, and start again. Got all # that? finalCs = changeset.ReadOnlyChangeSet() cs = changeset.ChangeSet() fileCount = 0 jobList = [] jobFilesNeeded = [] individualFilesNeeded = [] # make sure we write out the final changeset lastTrove = finalTroveList[-1][1] for current, (oldTroveInfo, (fromVersion, finalTrove)) in \ enumerate(itertools.izip(oldTrovesNeeded, finalTroveList)): if oldTroveInfo is not None: oldTrove = oldTroves.pop(0) assert(_sameHost(oldTrove.getVersion(), finalTrove.getVersion())) else: oldTrove = None # We can't trust filesNeeded diff returns here because it only # tells us about files whose fileId's have changed, but we need # to know about files whose versions changed as well (as those # may have moved servers). New files and changed files are # of interest (remember we're not necessarily diffing against # the fromVersion, so there could be new files). trvCs = finalTrove.diff(oldTrove, absolute = oldTrove is not None)[0] cs.newTrove(trvCs) # this is in the cache already, so there isn't any reason to # worry about optimizing the number of calls fromTrove = troveCache.getTrove((finalTrove.getName(), fromVersion, finalTrove.getFlavor())) filesNeeded = [] for pathId, path, newFileId, finalFileVersion in \ trvCs.getNewFileList(): # oldFileId is None because this is a new file fromFileVersion = fromTrove.getFile(pathId)[2] filesNeeded.append((pathId, newFileId, None, fromFileVersion)) for pathId, path, newFileId, finalFileVersion in \ trvCs.getChangedFileList(): if not finalFileVersion: # the file was renamed but its the same file. continue fromFileVersion = fromTrove.getFile(pathId)[2] if _sameHost(fromFileVersion, finalFileVersion): # The server already has this file on it; no reason to # commit it again continue oldFileId = oldTrove.getFile(pathId)[1] filesNeeded.append((pathId, newFileId, oldFileId, fromFileVersion)) if ((len(filesNeeded) * CHANGESET_MULTIPLE) >= finalTrove.fileCount()): # get the whole change set for this. jobList.append((finalTrove.getName(), (None, None), (fromVersion, finalTrove.getFlavor()), True)) # it's important that this have (pathId, newFileId) first # to ensure we're walking the changeset in the right order # after we sort it jobFilesNeeded += filesNeeded else: individualFilesNeeded += filesNeeded fileCount += len(filesNeeded) if finalTrove != lastTrove and fileCount < MAX_CLONE_FILES: continue callback.buildingChangeset(current + 1, len(finalTroveList)) fileChangeSet = self.repos.createChangeSet(jobList, withFiles = True, withFileContents = True, recurse = False, callback = callback) jobFilesNeeded = sorted(set(jobFilesNeeded)) # fileId, pathId of the last file we saw. we don't need to # include the same file contents twice (nor can we get them # twice from fileChangeSet lastContents = (None, None) # walk the filesNeeded for the files we're getting from changesets for (pathId, newFileId, oldFileId, fromFileVersion) in \ jobFilesNeeded: # we could diff here, but why bother? we don't have anything # to diff against anyway filecs = fileChangeSet.getFileChange(None, newFileId) cs.addFile(oldFileId, newFileId, filecs) # A word on ptr types. This blindly copies them, assuming # that we'll copy the file which actually includes the # contents as well. If that assumption is wrong, then those # file contents are already in the repository so we don't # need them anyway. That leaves a changeset with broken # ptr links, but it commits just fine. if (files.frozenFileHasContents(filecs) and not files.frozenFileFlags(filecs).isEncapsulatedContent() and (pathId, newFileId) != lastContents): # this copies the contents from the old changeset to the # new without recompressing (contType, contents) = fileChangeSet.getFileContents( pathId, newFileId, compressed = True) cs.addFileContents(pathId, newFileId, contType, contents, files.frozenFileFlags(filecs).isConfig(), compressed = True) lastContents = (pathId, newFileId) # now collect up the random files and handle those allFileObjects = self.repos.getFileVersions( [ (x[0], x[1], x[3]) for x in individualFilesNeeded ]) contentsNeeded = [] for fileObject, (pathId, newFileId, oldFileId, fromFileVersion) in \ itertools.izip(allFileObjects, individualFilesNeeded): diff, hash = changeset.fileChangeSet(pathId, None, fileObject) cs.addFile(oldFileId, newFileId, diff) if not fileObject.flags.isEncapsulatedContent() and hash: contentsNeeded.append( ((pathId, fileObject.flags.isConfig()), (newFileId, fromFileVersion))) allContents = self.repos.getFileContents( [ x[1] for x in contentsNeeded ], compressed = True, callback = callback) for (contents, ((pathId, isCfg), (newFileId, fromFileVersion))) in \ itertools.izip(allContents, contentsNeeded): cs.addFileContents(pathId, newFileId, changeset.ChangedFileTypes.file, contents, isCfg, compressed = True) del fileChangeSet, allContents, allFileObjects fd, path = tempfile.mkstemp(prefix='conary-promote-') os.close(fd) cs.writeToFile(path) finalCs.merge(changeset.ChangeSetFromFile(path)) os.remove(path) cs = changeset.ChangeSet() fileCount = 0 jobList = [] jobFilesNeeded = [] individualFilesNeeded = [] return finalCs def _createCloneJob(self, cloneOptions, chooser, troveCache): cloneJob = CloneJob(cloneOptions) cloneMap = CloneMap() chooser.setCloneMap(cloneMap) cloneOptions.callback.determiningCloneTroves() if cloneOptions.cloneOnlyByDefaultTroves: self._setByDefaultMap(chooser, troveCache) _logMe('determining troves to clone') self._determineTrovesToClone(chooser, cloneMap, cloneJob, troveCache, cloneOptions.callback) cloneOptions.callback.determiningTargets() _logMe('get existing leaves') leafMap = self._getExistingLeaves(cloneMap, troveCache, cloneOptions) _logMe('target sources') self._targetSources(chooser, cloneMap, cloneJob, leafMap, troveCache, cloneOptions.callback) _logMe('target binaries') self._targetBinaries(chooser, cloneMap, cloneJob, leafMap, troveCache, cloneOptions.callback) # some clones may rewrite the child troves (if cloneOnlyByDefaultTroves # is True). We need to make sure that any precloned aren't having # the list of child troves changed. _logMe('recheck preclones') self._recheckPreClones(cloneJob, cloneMap, troveCache, chooser, leafMap) troveTups = cloneJob.getTrovesToClone() unmetNeeds = self._checkNeedsFulfilled(troveTups, chooser, cloneMap, leafMap, troveCache, cloneOptions.callback) if unmetNeeds: _logMe('could not clone') raise CloneIncomplete(unmetNeeds) _logMe('Got clone job') return cloneJob, cloneMap, leafMap def _setByDefaultMap(self, chooser, troveCache): """ The byDefault map limits clones by the byDefault settings of the troves specified in the clone command (the primary troves). Troves that are byDefault False in all primary troves are not included in the clone. """ primaries = chooser.getPrimaryTroveList() troves = troveCache.getTroves(primaries, withFiles = False) byDefaultDict = dict.fromkeys(primaries, True) for trv in troves: # add all the troves that are byDefault True. # byDefault False ones we don't need to have in the dict. defaults = ((x[0], x[1]) for x in trv.iterTroveListInfo() if x[1]) byDefaultDict.update(defaults) chooser.setByDefaultMap(byDefaultDict) def _checkCloneListSanity(self, troveList): # check to make sure the list of troves we're # attempting to clone are sane. packages = set() components = {} for troveTup in troveList: #do not promote single component without parent troveName = troveTup[0] if not trove.troveIsComponent(troveName): packages.add(troveTup) elif not trove.troveIsSourceComponent(troveName): package = (troveName.split(':', 1)[0], troveTup[1], troveTup[2]) components.setdefault(package, []).append(troveName) missingPackages = set(components) - packages if not missingPackages: return componentNames = [] for packageTup in missingPackages: for componentName in components[packageTup]: componentNames.append(componentName) raise errors.CvcError('Cannot promote/clone components: %s. Please specify package names instead.' % (','.join(repr(x) for x in sorted(componentNames)),)) def _determineTrovesToClone(self, chooser, cloneMap, cloneJob, troveCache, callback): self._checkCloneListSanity(chooser.getPrimaryTroveList()) trvs = troveCache.getTroves(chooser.getPrimaryTroveList()) for trv in trvs: cloneMap.updateChildMap(trv) seen = set() toClone = chooser.getPrimaryTroveList() total = 0 current = 0 sourceByPackage = {} while toClone: total += len(toClone) needed = [] callback.determiningCloneTroves(current, total) for info in toClone: if (trove.troveIsPackage(info[0]) and chooser.shouldPotentiallyClone(info) is False): if (chooser.options.cloneOnlyByDefaultTroves and chooser.isByDefault(info)): needed.append(info) seen.add(info) else: current += 1 continue elif info in seen: current += 1 else: needed.append(info) seen.add(info) srcsNeeded = [ (n,v,f) for n,v,f in needed if (not trove.troveIsComponent(n) and (n,v,f) not in sourceByPackage) ] srcList = troveCache.getTroveInfo( trove._TROVEINFO_TAG_SOURCENAME, srcsNeeded) sourceByPackage.update( (x,y()) for x, y in itertools.izip(srcsNeeded, srcList) ) newToClone = [] for troveTup in needed: current += 1 callback.determiningCloneTroves(current, total) if troveTup[0].endswith(':source'): sourceName = None elif trove.troveIsComponent(troveTup[0]): try: sourceName = sourceByPackage[ (troveTup[0].split(":")[0], troveTup[1], troveTup[2])] except KeyError: # XXX This can't happen because groups have to include # packages, not components. However, the test suite # hand builds groups which don't obey this rule. Just # guess because it's good enough for the tests. sourceName = troveTup[0].split(":")[0] + ":source" else: sourceName = sourceByPackage[troveTup] if chooser.shouldClone(troveTup, sourceName): if not chooser.isExcluded(troveTup): targetBranch = chooser.getTargetBranch(troveTup[1]) cloneMap.addTrove(troveTup, targetBranch, sourceName) chooser.addSource(troveTup, sourceName) cloneJob.add(troveTup) for childTup in cloneMap.getChildren(troveTup): chooser.addReferenceByCloned(childTup) else: # don't include this collection, instead # only include child troves that aren't # components of this collection. for childTup in cloneMap.getChildren(troveTup): if (childTup[0].split(':')[0], childTup[1], childTup[2]) == troveTup: chooser.addReferenceByUncloned(childTup) else: chooser.addReferenceByCloned(childTup) else: if (chooser.options.cloneOnlyByDefaultTroves and chooser.isByDefault(troveTup) and chooser.isReferencedByCloned(troveTup)): for childTup in cloneMap.getChildren(troveTup): chooser.addReferenceByUncloned(childTup) if trove.troveIsPackage(troveTup[0]): # don't bother analyzing components for something # we're not cloning continue newToClone.extend(cloneMap.getChildren(troveTup)) toClone = newToClone def _getExistingLeaves(self, cloneMap, troveCache, cloneOptions): """ Gets the needed information about the current repository state to find out what clones may have already been performed (and should have their clonedFrom fields checked to be sure) """ leafMap = LeafMap(cloneOptions) query = [] for sourceTup, targetBranch in cloneMap.iterSourceTargetBranches(): query.append((sourceTup[0], targetBranch, sourceTup[2])) for binTup, targetBranch in cloneMap.iterBinaryTargetBranches(): query.append((binTup[0], targetBranch, binTup[2])) result = self.repos.findTroves(None, query, defaultFlavor = deps.parseFlavor(''), getLeaves=False, allowMissing=True, troveTypes=trovesource.TROVE_QUERY_ALL) if not result: return leafMap leafMap.addLeafResults(result) possiblePreClones = [] for queryItem, tupList in result.iteritems(): tupList = [ x for x in tupList if x[2] == queryItem[2] ] if not tupList: continue latest = sorted(tupList)[-1] if cloneMap.couldBePreClone(latest): possiblePreClones.append(latest) if not possiblePreClones: return leafMap leafMap.addClonedFromInfo(troveCache, possiblePreClones) return leafMap def _targetSources(self, chooser, cloneMap, cloneJob, leafMap, troveCache, callback): hasTroves = self.repos.hasTroves( [x[0] for x in cloneMap.iterSourceTargetBranches()]) presentTroveTups = [x[0] for x in hasTroves.items() if x[1]] _logMe("Getting clonedFromInfo for sources") leafMap.addClonedFromInfo(troveCache, presentTroveTups) _logMe("done") total = len(list(cloneMap.iterSourceTargetBranches())) current = 0 for sourceTup, targetBranch in cloneMap.iterSourceTargetBranches(): current += 1 callback.targetSources(current, total) if hasTroves[sourceTup]: newVersion = leafMap.isAlreadyCloned(sourceTup, targetBranch) if newVersion: cloneMap.target(sourceTup, newVersion) cloneJob.alreadyCloned(sourceTup) else: newVersion = leafMap.hasAncestor(sourceTup, targetBranch, self.repos) if chooser.shouldClone(sourceTup): if newVersion: leafVersion = leafMap.getLeafVersion(sourceTup[0], targetBranch, sourceTup[2]) if newVersion == leafVersion: cloneMap.target(sourceTup, newVersion) cloneJob.alreadyCloned(sourceTup) continue newVersion = leafMap.createSourceVersion(sourceTup, targetBranch) cloneMap.target(sourceTup, newVersion) cloneJob.target(sourceTup, newVersion) elif newVersion: cloneMap.target(sourceTup, newVersion) cloneJob.alreadyCloned(sourceTup) else: # should clone was false but the source trove exists - # we could have done this clone. raise CloneError( "Cannot find cloned source for %s=%s" \ % (sourceTup[0], sourceTup[1])) else: newVersion = leafMap.hasAncestor(sourceTup, targetBranch, self.repos) if newVersion: cloneMap.target(sourceTup, newVersion) cloneJob.alreadyCloned(sourceTup) else: # The source trove is not available to clone and either # this is not an uphill trove or the source is not # available on the uphill label. raise CloneError( "Cannot find required source %s on branch %s." \ % (sourceTup[0], targetBranch)) def _targetBinaries(self, chooser, cloneMap, cloneJob, leafMap, troveCache, callback): allBinaries = list(itertools.chain(*[x[1] for x in cloneMap.getBinaryTrovesBySource()])) _logMe("Getting clonedFromInfo for binaries") leafMap.addClonedFromInfo(troveCache, allBinaries) _logMe("Actually targeting binaries") versionsToGet = [] total = len(list(itertools.chain(*[x[0] for x in cloneMap.getBinaryTrovesBySource()]))) current = 0 for sourceTup, binaryList in cloneMap.getBinaryTrovesBySource(): if not binaryList: continue targetSourceVersion = cloneMap.getTargetVersion(sourceTup) if targetSourceVersion is None: raise errors.InternalConaryError( "Cannot find cloned source for %s=%s" \ % (sourceTup[0], sourceTup[1])) targetBranch = targetSourceVersion.branch() byVersion = {} for binaryTup in binaryList: current += 1 callback.targetBinaries(current, total) byFlavor = byVersion.setdefault(binaryTup[1].getSourceVersion(), {}) byFlavor.setdefault(binaryTup[2], []).append(binaryTup) cloneSource = False for byFlavor in byVersion.itervalues(): finalNewVersion = None for flavor, binaryList in byFlavor.iteritems(): # Binary list is a list of binaries all created from the # same cook command. newVersion = leafMap.isAlreadyCloned(binaryList, targetBranch) if (newVersion and (not finalNewVersion or finalNewVersion == newVersion)): finalNewVersion = newVersion else: finalNewVersion = None break if finalNewVersion: for binaryTup in itertools.chain(*byFlavor.itervalues()): cloneMap.target(binaryTup, finalNewVersion) cloneJob.alreadyCloned(binaryTup) else: binaryList = list(itertools.chain(*byFlavor.itervalues())) versionsToGet.append((targetSourceVersion, binaryList)) cloneSource = True if not cloneSource: # all binaries for this version were marked as already cloned # which means we don't need to retarget this source # component either. if not chooser._matchesPrimaryTrove(sourceTup, None): cloneJob.alreadyCloned(sourceTup) if not versionsToGet: return _logMe("getting new version for %s binaries" % (len(versionsToGet))) callback.targetBinaries() newVersions = leafMap.createBinaryVersions(self.repos, versionsToGet) for newVersion, versionInfo in itertools.izip(newVersions, versionsToGet): binaryList = versionInfo[1] for binaryTup in binaryList: cloneMap.target(binaryTup, newVersion) cloneJob.target(binaryTup, newVersion) def _checkNeedsFulfilled(self, troveTups, chooser, cloneMap, leafMap, troveCache, callback): query = {} neededInfoTroveTups = {} callback.checkNeedsFulfilled() total = len(troveTups) current = 0 _logMe("Checking needs are fulfilled for %s troves" % (len(troveTups))) troveCache.getTroves(troveTups, withFiles=False) for troveTup in troveTups: current += 1 callback.checkNeedsFulfilled(current, total) trv = troveCache.getTrove(troveTup, withFiles=False) for mark, src in _iterAllVersions(trv): if (chooser.troveInfoNeedsRewrite(mark[0], src) and not cloneMap.hasRewrite(src)): if mark[0] == V_LOADED: # Loaded troves are recorded with the flavor which # was used to load the recipe, the flavor to use # to get the trove from the repo is empty neededInfoTroveTups.setdefault( (src[0], src[1], deps.ThawFlavor('')), []).append(mark) else: neededInfoTroveTups.setdefault(src, []).append(mark) _logMe("Checking clonedFrom info for %s needed troves" % (len(neededInfoTroveTups))) leafMap.addClonedFromInfo(troveCache, neededInfoTroveTups) total = len(neededInfoTroveTups) current = 0 for troveTup in neededInfoTroveTups: callback.checkNeedsFulfilled(current, total) current += 1 targetBranch = chooser.getTargetBranch(troveTup[1]) if leafMap.isAlreadyCloned(troveTup, targetBranch): continue marks = neededInfoTroveTups[troveTup] queryItem = troveTup[0], targetBranch, troveTup[2] if queryItem not in query: query[queryItem] = troveTup, marks query[queryItem][1].extend(marks) results = self.repos.findTroves(None, query, None, bestFlavor=True, allowMissing=True) leafMap.addLeafResults(results) matches = [] for queryItem, tupList in results.iteritems(): sourceTup = query[queryItem][0] upstreamVersion = sourceTup[1].trailingRevision().getVersion() for troveTup in tupList: if (troveTup[1].trailingRevision().getVersion() == upstreamVersion and sourceTup[2] == troveTup[2]): matches.append(troveTup) _logMe("Checking clonedFrom info for %s matching nodes" % (len(matches))) leafMap.addClonedFromInfo(troveCache, matches) total = len(query) current = 0 for queryItem, (sourceTup, markList) in query.items(): current += 1 callback.checkNeedsFulfilled(current, total) newVersion = leafMap.isAlreadyCloned(sourceTup, queryItem[1]) if not newVersion: newVersion = leafMap.hasAncestor(sourceTup, queryItem[1], self.repos) if newVersion: cloneMap.target(sourceTup, newVersion) del query[queryItem] unmetNeeds = query.values() unmetNeeds = chooser.filterUnmetTroveInfoItems(unmetNeeds) return unmetNeeds def _recheckPreClones(self, cloneJob, cloneMap, troveCache, chooser, leafMap): # We only child for missing trove references, not build reqs for # reclones. Otherwise you could have to reclone when minor details # about the entironment have changed. troveTups = cloneJob.getPreclonedTroves() # match up as many needed targets for these clone as possible. _logMe("Rechecking %s preclones" % len(troveTups)) needed = [] fetch = [] hasList = [] for troveTup in troveTups: _logMe("Rechecking %s" % (troveTup,)) if not trove.troveIsCollection(troveTup[0]): # this is only interested in missing references for included # troves. only collections have those continue newVersion = cloneMap.getTargetVersion(troveTup) clonedTup = (troveTup[0], newVersion, troveTup[2]) needed += [ (troveTup, clonedTup) ] fetch += [ clonedTup ] hasList.append(clonedTup) hasList += [ (x[0], clonedTup[1], clonedTup[2]) for x in cloneMap.getChildren(troveTup) ] groupsNeeded = [ x[0] for x in needed if trove.troveIsGroup(x[0][0]) ] groupsNeeded += [ x[1] for x in needed if trove.troveIsGroup(x[0][0]) ] groupTroves = troveCache.getTroves(groupsNeeded) groupTroves = dict( itertools.izip(groupsNeeded, groupTroves) ) hasTroves = troveCache.hasTroves(hasList) toReclone = [] for (troveTup, clonedTup) in needed: if trove.troveIsGroup(troveTup[0]): trvChildren = list( groupTroves[troveTup].iterTroveList(strongRefs = True, weakRefs = True) ) else: trvChildren = cloneMap.getChildren(troveTup) assert(trvChildren) if trove.troveIsGroup(troveTup[0]): clonedChildren = list( groupTroves[clonedTup].iterTroveList(strongRefs = True, weakRefs = True) ) else: clonedChildren = [] for x in cloneMap.getChildren(troveTup): childTup = (x[0], clonedTup[1], clonedTup[2]) if hasTroves[childTup]: clonedChildren.append(childTup) if self._shouldReclone(trvChildren, clonedChildren, chooser, cloneMap): toReclone.append(troveTup) trovesBySource = cloneMap.getTrovesWithSameSource(toReclone) _logMe("Recloning %s troves" % len(trovesBySource)) for binaryList in trovesBySource: sourceVersion = cloneMap.getSourceVersion(binaryList[0]) targetSourceVersion = cloneMap.getTargetVersion(sourceVersion) newVersion = leafMap.createBinaryVersion(self.repos, binaryList, targetSourceVersion) for binaryTup in binaryList: cloneMap.target(binaryTup, newVersion) cloneJob.target(binaryTup, newVersion) def _shouldReclone(self, origTroveChildren, clonedTroveChildren, chooser, cloneMap): childTroves = {} clonedChildTroves = {} for src in origTroveChildren: if chooser.troveInfoNeedsRewrite(V_REFTRV, src): targetBranch = chooser.getTargetBranch(src[1]) childTroves[src[0], targetBranch, src[2]] = True elif chooser.troveInfoNeedsErase(V_REFTRV, src): continue else: childTroves[src[0], src[1].branch(), src[2]] = True for src in clonedTroveChildren: clonedChildTroves[src[0], src[1].branch(), src[2]] = True if childTroves == clonedChildTroves: return False return True def _buildTroves(self, chooser, cloneMap, cloneJob, leafMap, troveCache, callback): # fill the trove cache with a single repository call allTroveList = [] for troveTup, newVersion in cloneJob.iterTargetList(): allTroveList.append(troveTup) targetBranch = newVersion.branch() leafVersion = leafMap.getLeafVersion(troveTup[0], targetBranch, troveTup[2]) if leafVersion: allTroveList.append((troveTup[0], leafVersion, troveTup[2])) # this getTroves populates troveCache.hasTroves simultaneously has = troveCache.hasTroves(allTroveList) toFetch = [ x for x, y in itertools.izip(allTroveList, has) if y ] troveCache.getTroves(toFetch, withFiles=True) #del allTroveList, has current = 0 finalTroves = [] total = len(list(cloneJob.iterTargetList())) for troveTup, newVersion in cloneJob.iterTargetList(): current += 1 callback.rewriteTrove(current, total) trv = troveCache.getTrove(troveTup, withFiles=True) oldVersion = trv.getVersion() newTrv = self._rewriteTrove(trv, newVersion, chooser, cloneMap, cloneJob, leafMap, troveCache) if not newTrv: return None # make sure we haven't deleted all the child troves from # a group. This could happen, for example, if a group # contains all byDefault False components. if trove.troveIsCollection(troveTup[0]): if not list(newTrv.iterTroveList(strongRefs=True)): raise CloneError("Clone would result in empty collection " "%s=%s[%s]" % (troveTup)) sigKeyId = selectSignatureKey(self.cfg, newTrv.getVersion().trailingLabel()) if sigKeyId is not None: newTrv.addDigitalSignature(sigKeyId) else: # if no sigKeyId, just add sha1s newTrv.computeDigests() finalTroves.append((oldVersion, newTrv)) return finalTroves def _rewriteTrove(self, trv, newVersion, chooser, cloneMap, cloneJob, leafMap, troveCache): # make a copy so we don't corrupt the copy in the trove cache trv = trv.copy() troveName, troveVersion, troveFlavor = trv.getNameVersionFlavor() troveBranch = troveVersion.branch() targetBranch = newVersion.branch() needsNewVersions = [] assert(troveVersion.trailingRevision().getVersion() == newVersion.trailingRevision().getVersion()) if cloneJob.options.trackClone: # cloned from tracks exactly where we cloned from trv.troveInfo.clonedFrom.set(troveVersion) # cloned from list lists all places we've cloned from, # with the most recent clone at the end trv.troveInfo.clonedFromList.append(troveVersion) # clone the labelPath labelPath = list(trv.getLabelPath()) labelPathMap = [(x, cloneMap.getCloneTargetLabelsForLabel(x)) for x in labelPath] labelPath = _computeLabelPath(trv.getName(), labelPathMap) if labelPath: trv.setLabelPath(labelPath) trv.changeVersion(newVersion) trv.copyMetadata(trv) # flatten metadata for mark, src in _iterAllVersions(trv): if chooser.troveInfoNeedsRewrite(mark[0], src): newVersion = cloneMap.getTargetVersion(src) if newVersion is None: continue _updateVersion(trv, mark, newVersion) elif chooser.troveInfoNeedsErase(mark[0], src): _updateVersion(trv, mark, None) if trove.troveIsFileSet(trv.getName()): needsRewriteFn = chooser.filesetFileNeedsRewrite else: needsRewriteFn = chooser.fileNeedsRewrite for (pathId, path, fileId, version) in trv.iterFileList( members = True, capsules = True): if needsRewriteFn(troveBranch, targetBranch, version): needsNewVersions.append((pathId, path, fileId)) # need to be reversioned if needsNewVersions: leafVersion = leafMap.getLeafVersion(troveName, targetBranch, troveFlavor) if leafVersion and troveCache.hasTrove(troveName, leafVersion, troveFlavor): oldTrv = troveCache.getTrove((troveName, leafVersion, troveFlavor), withFiles = True) # pathId, fileId -> fileVersion map fileMap = dict(((x[0], x[2]), x[3]) for x in oldTrv.iterFileList(members = True, capsules = True)) else: fileMap = {} for (pathId, path, fileId) in needsNewVersions: ver = fileMap.get((pathId, fileId), newVersion) trv.updateFile(pathId, path, ver, fileId) # CNY-1900: for any redirects where the redirect target exists on # both sides of the promote map, and the redirect target on the # target of the promote map is cloned from the redirect target on # the source side of the promote map, then re-write the redirect # to follow the promote map redirects = [x for x in trv.redirects.iter()] for redirect in redirects: sourceLabel = redirect.branch().label() targetLabel = chooser.getTargetLabel(sourceLabel) if targetLabel: redirTargetTups = [ (redirect.name(), redirect.branch().asString(), redirect.flavor()), (redirect.name(), targetLabel.asString(), redirect.flavor()) ] result = troveCache.repos.findTroves( None, redirTargetTups, None, allowMissing=True) sourceRedirTarget = result[redirTargetTups[0]] targetRedirTarget = result[redirTargetTups[1]] if sourceRedirTarget and targetRedirTarget: redirTargetTroves = troveCache.getTroves( sourceRedirTarget + targetRedirTarget, withFiles=False) if (redirTargetTroves[1].troveInfo.clonedFrom() == redirTargetTroves[0].getVersion()): trv.redirects.remove(redirect) trv.redirects.add( redirTargetTroves[1].getName(), redirTargetTroves[1].getVersion().branch(), redirTargetTroves[1].getFlavor()) infoOnly = cloneJob.options.infoOnly if trv.getName().endswith(':source') and not infoOnly: try: cl = cloneJob.options.callback.getCloneChangeLog(trv) except Exception, e: log.error(str(e)) return None if cl is None: log.error("no change log message was given" " for %s." % trv.getName()) return None trv.changeChangeLog(cl) # reset the signatures, because all the versions have now # changed, thus invalidating the old sha1 hash trv.troveInfo.sigs.reset() if not infoOnly: # not computing signatures will # make sure this doesn't get committed trv.computeDigests() return trv def _iterAllVersions(trv, rewriteTroveInfo=True): # return all versions which need rewriting except for file versions # and the version of the trove itself. file versions are handled # separately since we can clone even if the files don't already # exist on the target branch (we just add them), and trove versions # are always rewritten even when cloning to the same branch # (while other versions are not) if rewriteTroveInfo: for troveTuple in \ [ x for x in trv.troveInfo.loadedTroves.iter() ]: yield ((V_LOADED, troveTuple), (troveTuple.name(), troveTuple.version(), troveTuple.flavor())) for troveTuple in \ [ x for x in trv.troveInfo.buildReqs.iter() ]: yield ((V_BREQ, troveTuple), (troveTuple.name(), troveTuple.version(), troveTuple.flavor())) for troveInfo in [ x for x in trv.iterTroveList(strongRefs=True, weakRefs=True) ]: yield ((V_REFTRV, troveInfo), troveInfo) def _updateVersion(trv, mark, newVersion): """ Update version for some piece of troveInfo. If newVersion is None, just erase this version. """ kind = mark[0] if kind == V_LOADED: trv.troveInfo.loadedTroves.remove(mark[1]) if newVersion: trv.troveInfo.loadedTroves.add(mark[1].name(), newVersion, mark[1].flavor()) elif kind == V_BREQ: trv.troveInfo.buildReqs.remove(mark[1]) if newVersion: trv.troveInfo.buildReqs.add(mark[1].name(), newVersion, mark[1].flavor()) elif kind == V_REFTRV: (name, oldVersion, flavor) = mark[1] isStrong = trv.isStrongReference(name, oldVersion, flavor) byDefault = trv.includeTroveByDefault(name, oldVersion, flavor) trv.delTrove(name, oldVersion, flavor, False, weakRef = not isStrong) if newVersion: if not trv.hasTrove(name, newVersion, flavor): trv.addTrove(name, newVersion, flavor, byDefault = byDefault, weakRef = not isStrong) else: # it's possible that this trove already exists in this group # this could happen if the trove has previously been cloned # and the group contains a reference to the cloned and # uncloned versions. Afterwards there will just be one # reference. if not isStrong: return # delete a weak reference if it exists, there should only # be one reference to this package in this group. trv.delTrove(name, newVersion, flavor, missingOkay = True, weakRef = True) trv.addTrove(name, newVersion, flavor, byDefault = byDefault, presentOkay = True, weakRef = not isStrong) else: assert(0) def _computeLabelPath(name, labelPathMap): newLabelPath = [] for label, newLabels in labelPathMap: if len(newLabels) > 1: raise CloneError("Multiple clone targets for label %s" " - cannot build labelPath for %s" % (label, name)) elif newLabels: newLabel = newLabels.pop() else: newLabel = label if newLabel in newLabelPath: # don't allow duplicates continue newLabelPath.append(newLabel) return newLabelPath class CloneOptions(object): def __init__(self, fullRecurse=True, cloneSources=True, trackClone=True, callback=None, message=DEFAULT_MESSAGE, cloneOnlyByDefaultTroves=False, updateBuildInfo=True, infoOnly=False, bumpGroupVersions=False, enforceFullBuildInfoCloning=False, excludeGroups=False): self.fullRecurse = fullRecurse self.cloneSources = cloneSources self.trackClone = trackClone if callback is None: callback = callbacks.CloneCallback() self.callback = callback self.message = message self.cloneOnlyByDefaultTroves = cloneOnlyByDefaultTroves self.updateBuildInfo = updateBuildInfo self.infoOnly = infoOnly self.bumpGroupVersions = bumpGroupVersions self.enforceFullBuildInfoCloning = enforceFullBuildInfoCloning self.excludeGroups = excludeGroups class TroveCache(object): def __init__(self, repos, callback): self._hasTroves = {} self.troves = {True : {}, False : {}} self.repos = repos self.callback = callback def hasTrove(self, name, version, flavor): return self.hasTroves([(name, version, flavor)])[name, version, flavor] def hasTroves(self, troveTups): needed = [ x for x in troveTups if x not in self._hasTroves ] if needed: self._hasTroves.update(self.repos.hasTroves(needed)) return dict((x, self._hasTroves[x]) for x in troveTups) def _get(self, troveTups, withFiles): cs = self.repos.createChangeSet( [ (x[0], (None, None), (x[1], x[2]), True) for x in troveTups], withFiles = withFiles, withFileContents = False, recurse = False) for x in troveTups: self.troves[withFiles][x] = cs.getNewTroveVersion(*x) if trove.troveIsCollection(x[0]): self.troves[not withFiles][x] = cs.getNewTroveVersion(*x) def getTroves(self, troveTups, withFiles=True): theDict = self.troves[withFiles] needed = [ x for x in troveTups if x not in theDict ] if needed: _logMe('getting %s troves from repos' % len(needed)) self._get(troveTups, withFiles) # this prevents future hasTroves calls from calling the server self._hasTroves.update((x, True) for x in troveTups) return [ trove.Trove(theDict[x], skipIntegrityChecks = (not withFiles)) for x in troveTups ] def getTrove(self, troveTup, withFiles=True): return self.getTroves([troveTup], withFiles=withFiles)[0] def getTroveInfo(self, *args): return self.repos.getTroveInfo(*args) class CloneChooser(object): def __init__(self, targetMap, primaryTroveList, cloneOptions): # make sure there are no zeroed timeStamps - branches may be # user-supplied string newMap = {} for key, value in targetMap.iteritems(): if isinstance(key, versions.Branch): key = key.copy() key.resetTimeStamps() if isinstance(value, versions.Branch): value = value.copy() value.resetTimeStamps() newMap[key] = value self.primaryTroveList = primaryTroveList self.targetMap = newMap self.byDefaultMap = None self.referencedByClonedMap = {} self.referencedByUnclonedMap = {} self.options = cloneOptions def getPrimaryTroveList(self): return self.primaryTroveList def getTargetLabel(self, label): return self.targetMap.get(label, None) def setByDefaultMap(self, map): self.byDefaultMap = map def setCloneMap(self, cloneMap): self.cloneMap = cloneMap def addSource(self, troveTup, sourceName): if self.byDefaultMap is None: return noFlavor = deps.parseFlavor('') version = troveTup[1] sourceVersion = version.getSourceVersion(False) sourceTup = (sourceName, sourceVersion, noFlavor) self.byDefaultMap[sourceTup] = True def isByDefault(self, troveTup): if self.byDefaultMap is None: return True return troveTup in self.byDefaultMap def addReferenceByCloned(self, troveTup): self.referencedByClonedMap[troveTup] = True def addReferenceByUncloned(self, troveTup): self.referencedByUnclonedMap[troveTup] = True def isReferencedByCloned(self, troveTup): return troveTup in self.referencedByClonedMap def isExcluded(self, troveTup): # excludeGroups excludes groups *and their components*, so we # don't use troveIsGroup() here return (self.options.excludeGroups and troveTup[0].startswith('group-') and not troveTup[0].endswith(':source')) def shouldPotentiallyClone(self, troveTup): """ returns True if you definitely should clone this trove returns False if you definitely should not clone this trove returns None if it's undecided. """ name, version, flavor = troveTup if self.byDefaultMap is not None: if troveTup not in self.byDefaultMap: return False if troveTup in self.referencedByUnclonedMap: # don't clone anything that's referenced by other packages # that are not being cloned. return False if (version.branch() not in self.targetMap and version.trailingLabel() not in self.targetMap and None not in self.targetMap): return False if name.endswith(':source'): if self.options.cloneSources: return True elif self.options.fullRecurse: return True def shouldClone(self, troveTup, sourceName=None): shouldClone = self.shouldPotentiallyClone(troveTup) if shouldClone is not None: return shouldClone return self._matchesPrimaryTrove(troveTup, sourceName) def _matchesPrimaryTrove(self, troveTup, sourceName): name, version, flavor = troveTup if name.endswith(':source'): return (name, version, flavor) in self.primaryTroveList assert(sourceName) sourcePackage = sourceName.split(':')[0] parentPackage = (sourcePackage, version, flavor) if parentPackage not in self.primaryTroveList: return False return True def getTargetBranch(self, version): sourceLabel = version.trailingLabel() sourceBranch = version.branch() target = self.targetMap.get(sourceBranch, None) if target is None: target = self.targetMap.get(sourceLabel, None) if target is None: target = self.targetMap.get(None, None) if target is None: return None if isinstance(target, versions.Label): return sourceBranch.createSibling(target) elif isinstance(target, versions.Branch): return target assert(0) def troveInfoNeedsRewrite(self, kind, troveTup): targetBranch = self.getTargetBranch(troveTup[1]) if not targetBranch: return False if self.byDefaultMap is not None and troveTup not in self.byDefaultMap: return False if kind == V_REFTRV: # only rewrite trove info if we're cloning that trove. # otherwise, assume it's correct. return troveTup in self.cloneMap.targetMap if targetBranch == troveTup[1].branch(): # this means that we're merely pushing this trove to tip # on same branch return False return self.options.updateBuildInfo def filesetFileNeedsRewrite(self, troveBranch, targetBranch, fileVersion): targetMap = self.targetMap return (fileVersion.branch() in targetMap or fileVersion.trailingLabel() in targetMap or None in targetMap) def fileNeedsRewrite(self, troveBranch, targetBranch, fileVersion): if fileVersion.depth() == targetBranch.depth(): # if the file is on /A and we're cloning to /C, then that needs # to be rewritten. If we're on /C already, no rewriting necessary return fileVersion.branch() != targetBranch # if the fileVersion is at some level that's deeper than # the target branch - say, the file is on /A//B and the clone # is being made to /A, then the file must be rewritten. # If, instead, the file on /A and the clone is being made to # /A//B, then the file is ok. return fileVersion.depth() > targetBranch.depth() def troveInfoNeedsErase(self, kind, troveTup): if kind != V_REFTRV: # we only erase trove references - all other types # just let remain with their old, uncloned values. # This could change. return False return (self.byDefaultMap is not None and troveTup not in self.referencedByUnclonedMap and troveTup not in self.byDefaultMap) def filterUnmetTroveInfoItems(self, unmetTroveInfoItems): if self.options.enforceFullBuildInfoCloning: return unmetTroveInfoItems return [ (mark,troveTup) for (mark,troveTup) in unmetTroveInfoItems if mark[0] == V_REFTRV ] class CloneMap(object): def __init__(self): self.targetMap = {} self.trovesByTargetBranch = {} self.trovesBySource = {} self.sourcesByTrove = {} self.childMap = {} def addTrove(self, troveTup, targetBranch, sourceName=None): name, version, flavor = troveTup if (name, targetBranch, flavor) in self.trovesByTargetBranch: if self.trovesByTargetBranch[name, targetBranch, flavor] == version: return otherVersion = self.trovesByTargetBranch[name, targetBranch, flavor] if not flavor.isEmpty(): troveSpec = '%s[%s]' % (name, flavor) else: troveSpec = name versions = [ str(otherVersion), str(version) ] versions.sort() raise CloneError("Cannot clone multiple versions of %s" " to branch %s at the same time. Attempted to" " clone versions %s and %s" % (troveSpec, targetBranch, versions[0], versions[1])) self.trovesByTargetBranch[name, targetBranch, flavor] = version if name.endswith(':source'): self.trovesBySource.setdefault((name, version, flavor), []) return noFlavor = deps.parseFlavor('') sourceVersion = version.getSourceVersion(False) sourceTup = (sourceName, sourceVersion, noFlavor) self.addTrove(sourceTup, targetBranch) self.trovesBySource[sourceTup].append(troveTup) self.sourcesByTrove[troveTup] = sourceTup def iterSourceTargetBranches(self): for (name, targetBranch, flavor), version \ in self.trovesByTargetBranch.iteritems(): if name.endswith(':source'): yield (name, version, flavor), targetBranch def iterBinaryTargetBranches(self): for (name, targetBranch, flavor), version \ in self.trovesByTargetBranch.iteritems(): if not name.endswith(':source'): yield (name, version, flavor), targetBranch def getBinaryTrovesBySource(self): return self.trovesBySource.items() def getTrovesWithSameSource(self, troveTupleList): bySource = {} for troveTup in troveTupleList: sourceTup = self.sourcesByTrove[troveTup] bySource[sourceTup] = self.trovesBySource[sourceTup] return bySource.values() def getSourceVersion(self, troveTup): return self.sourcesByTrove[troveTup] def target(self, troveTup, targetVersion): oldBranch = troveTup[1].branch() targetBranch = targetVersion.branch() while targetBranch.depth() < oldBranch.depth(): oldBranch = oldBranch.parentBranch() if not (targetBranch == oldBranch or targetBranch.isSibling(oldBranch)): raise CloneError("clone only supports cloning troves to sibling " "branches, parents, and siblings of parent" " branches") self.targetMap[troveTup] = targetVersion def getTargetVersion(self, troveTup): return self.targetMap.get(troveTup, None) def couldBePreClone(self, troveTup): info = (troveTup[0], troveTup[1].branch(), troveTup[2]) if info in self.trovesByTargetBranch: return True return False def hasRewrite(self, troveTup): return troveTup in self.targetMap def getCloneTargetLabelsForLabel(self, label): matches = set() for troveTup, newVersion in self.targetMap.iteritems(): if troveTup[1].trailingLabel() == label: matches.add(newVersion.trailingLabel()) return matches def updateChildMap(self, trv): l = list(trv.iterTroveList(strongRefs=True, weakRefs=True)) l.sort() self.childMap[trv.getNameVersionFlavor()] = set(l) for child in l: if trove.troveIsPackage(child[0]): if child not in self.childMap: self.childMap[child] = set() elif trove.troveIsComponent(child[0]): pkg = child[0].split(":")[0] self.childMap[(pkg, child[1], child[2])].add(child) def getChildren(self, trvTuple): return self.childMap.get(trvTuple, set()) class LeafMap(object): def __init__(self, options): self.clonedFrom = {} self.branchMap = {} self.options = options def _addTrove(self, troveTup, clonedFrom=None): name, version, flavor = troveTup if clonedFrom is None: clonedFrom = set([troveTup[1]]) self.clonedFrom[troveTup] = clonedFrom def _getClonedFrom(self, troveTup): if troveTup in self.clonedFrom: return self.clonedFrom[troveTup] return set([troveTup[1]]) def addLeafResults(self, branchMap): self.branchMap.update(branchMap) def getLeafVersion(self, name, targetBranch, flavor): if (name, targetBranch, flavor) not in self.branchMap: return None troveList = [ x for x in self.branchMap[name, targetBranch, flavor] if x[2] == flavor ] if troveList: return sorted(troveList)[-1][1] return None @staticmethod def hasAncestor(troveTup, targetBranch, repos): newVersion = troveTup[1] if newVersion.branch() == targetBranch: # even if we're an unmodified shadow - if we're cloning to our # own branch we want to use other tests to determine if # the clone is necessary. return False while (newVersion.isShadow() and not newVersion.isModifiedShadow() and newVersion.branch() != targetBranch): newVersion = newVersion.parentVersion() if (newVersion.branch() == targetBranch and repos.hasTrove(troveTup[0], newVersion, troveTup[2])): return newVersion return False def isAlreadyCloned(self, troveTupleList, targetBranch): if not isinstance(troveTupleList, list): troveTupleList = [troveTupleList] finalTargetVersion = None for troveTup in troveTupleList: myClonedFrom = self._getClonedFrom(troveTup) name, version, flavor = troveTup targetVersion = self.getLeafVersion(name, targetBranch, flavor) if not targetVersion: return False targetTup = name, targetVersion, flavor targetClonedFrom = self._getClonedFrom(targetTup) if not myClonedFrom & targetClonedFrom: # either the version we're thinking about cloning is # in the cloned from field or maybe we're both cloned # from the same place. return False if targetVersion != finalTargetVersion: if finalTargetVersion: # conflict on clone version. return False finalTargetVersion = targetVersion return finalTargetVersion def createSourceVersion(self, sourceTup, targetBranch): name, version, flavor = sourceTup targetBranchVersionList = [x[1] for x in self.branchMap.get((name, targetBranch, flavor), [])] revision = version.trailingRevision().copy() return nextversion.nextSourceVersion(targetBranch, revision, targetBranchVersionList) def createBinaryVersion(self, repos, binaryList, sourceVersion): # We should be able to avoid the repos calls made in here... # but it may not be worth it. return self.createBinaryVersions(repos, [(sourceVersion, binaryList)])[0] def createBinaryVersions(self, repos, sourceBinaryList): # takes a (sourceVersion, troveTupList) -> # (sourceVersion, pkgNames, flavorList) list. troveList = [(x[0], # sourceVersion set([y[0] for y in x[1]]), # all names set([y[2] for y in x[1]])) # all flavors for x in sourceBinaryList] bumpList = {True: [], False: []} for idx, item in enumerate(troveList): nameList = item[1] if (self.options.bumpGroupVersions and trove.troveIsGroup(iter(nameList).next())): bumpList[True].append((idx, item)) else: bumpList[False].append((idx, item)) allVersions = [None] * len(troveList) for bumpVersions, troveList in bumpList.items(): indexes = [ x[0] for x in troveList ] troveList = [ x[1] for x in troveList ] newVersions = nextversion.nextVersions(repos, None, troveList, alwaysBumpCount=bumpVersions) for idx, newVersion in itertools.izip(indexes, newVersions): allVersions[idx] = newVersion return allVersions def addClonedFromInfo(self, troveCache, tupList): """ Recurse through clonedFrom information for the given tupList so that we can know all the troves in the cloned history for these troves. """ # Note - this is a bit inefficient. Without knowing what trove # we're going to compare these troves against in the "clonedFrom" # field, we could be doing lots of extra work. However, this way # is very generic. clonedFromInfo = dict((x, set([x[1]])) for x in tupList) trovesByHost = {} # sort by host so that if a particular repository is down # we can continue to look at the rest of the clonedFrom info. for troveTup in sorted(tupList): if troveTup[1].isInLocalNamespace(): continue host = troveTup[1].trailingLabel().getHost() l = trovesByHost.setdefault(host, []) if (troveTup[0].split(":")[0], troveTup[1], troveTup[2]) not in l: l.append(troveTup) results = dict() for host, troveTups in trovesByHost.items(): try: infoList = troveCache.getTroveInfo( trove._TROVEINFO_TAG_CLONEDFROMLIST, troveTups) except errors.ConaryError, msg: log.debug('warning: Could not access host %s: %s' % (host, msg)) # handle old CLONEDFROM adequately if CLONEDFROMLIST doesn't # exist missingList = [ i for i, x in enumerate(infoList) if x is None ] cfList = [] try: cfList = troveCache.getTroveInfo( trove._TROVEINFO_TAG_CLONEDFROM, [ troveTups[x] for x in missingList ]) except errors.ConaryError, msg: log.debug('warning: Could not access host %s: %s' % (host, msg)) for i, clonedFrom in itertools.izip(missingList, cfList): if clonedFrom: infoList[i] = [ clonedFrom() ] else: infoList[i] = None results.update(itertools.izip(troveTups, infoList)) for troveTup in tupList: if troveTup[1].isInLocalNamespace(): continue if troveTup not in results and trove.troveIsComponent(troveTup[0]): name = troveTup[0].split(":")[0] else: name = troveTup[0] clonedFromList = results[(name, troveTup[1], troveTup[2])] if clonedFromList: # Looks weird, but switches from a version stream to # a version object for clonedFrom in clonedFromList: clonedFromInfo[troveTup].add(clonedFrom) for troveTup, clonedFrom in clonedFromInfo.iteritems(): self._addTrove(troveTup, clonedFrom) class CloneError(errors.ClientError): pass class CloneIncomplete(CloneError): def __str__(self): l = [] loadRecipes = [] buildReqs = [] refTroves = [] for src, markList in self.needs: for mark in markList: what = "%s=%s[%s]" % (src[0], src[1], src[2]) if mark[0] == V_LOADED: loadRecipes.append(what) elif mark[0] == V_BREQ: buildReqs.append(what) elif mark[0] == V_REFTRV: refTroves.append(what) l.extend(["build requirement: %s" % x for x in sorted(set(buildReqs))]) l.extend(["loadRecipe: %s" % x for x in sorted(set(loadRecipes))]) l.extend(["referenced trove: %s" % x for x in sorted(set(refTroves))]) return "Clone cannot be completed because some troves are not " + \ "available on the target branch.\n\t" + \ "\n\t".join(l) def __init__(self, needs): CloneError.__init__(self) self.needs = needs #start = time.time() def _logMe(msg): return # Dead code start = 0 secs = int(time.time() - start) mins = secs / 60 secs = secs % 60 if mins: timeStr = '%s mins, %s secs' % (mins, secs) else: timeStr = '%s secs' % (secs) print '\n%s (%s): %s' % (time.strftime('%X'), timeStr, msg)
{ "content_hash": "6dd6df35d9771616c1dfb721a4eb150d", "timestamp": "", "source": "github", "line_count": 1725, "max_line_length": 163, "avg_line_length": 44.01623188405797, "alnum_prop": 0.5612949109682858, "repo_name": "fedora-conary/conary", "id": "a1716eb14771451ff36e4b177ec6c1bae5afed46", "size": "76515", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "conary/conaryclient/clone.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "481681" }, { "name": "C++", "bytes": "8244" }, { "name": "CSS", "bytes": "3920" }, { "name": "Erlang", "bytes": "477" }, { "name": "Perl", "bytes": "45629" }, { "name": "Python", "bytes": "10586616" }, { "name": "Shell", "bytes": "4657" }, { "name": "Standard ML", "bytes": "2756" } ], "symlink_target": "" }
from fabric.api import env, local env.hosts = ['localhost'] def npm_install(): """Correctly runs npm install""" local('cp node.json package.json') local('npm install') local('rm package.json') def test(): """Run tests with coverage""" local('nosetests --with-coverage --cover-package=standup ' '--cover-inclusive')
{ "content_hash": "909530c161e382a838b35e55b63fd80f", "timestamp": "", "source": "github", "line_count": 17, "max_line_length": 62, "avg_line_length": 20.88235294117647, "alnum_prop": 0.6338028169014085, "repo_name": "rlr/standup", "id": "7cd23c5d488fb6645aa621eda37ae36c6056dd84", "size": "355", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "fabfile.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "33470" }, { "name": "JavaScript", "bytes": "2960" }, { "name": "Python", "bytes": "149693" }, { "name": "Shell", "bytes": "427" } ], "symlink_target": "" }
import unittest from conans.test.utils.tools import TestClient from conans.test.utils.python_test_files import py_hello_conan_files import platform class PythonDiamondTest(unittest.TestCase): def setUp(self): self.client = TestClient() def _export_upload(self, name, version=None, deps=None): files = py_hello_conan_files(name, version, deps) self.client.save(files, clean_first=True) self.client.run("export lasote/stable") def reuse_test(self): self._export_upload("Hello0", "0.1") self._export_upload("Hello1", "0.1", ["Hello0/0.1@lasote/stable"]) self._export_upload("Hello2", "0.1", ["Hello0/0.1@lasote/stable"]) self._export_upload("Hello3", "0.1", ["Hello1/0.1@lasote/stable", "Hello2/0.1@lasote/stable"]) files3 = py_hello_conan_files("Hello4", "0.1", ["Hello3/0.1@lasote/stable"]) self.client.save(files3, clean_first=True) self.client.run("install .") self.assertIn("Hello1/0.1@lasote/stable: Build stuff Hello0", self.client.user_io.out) self.assertIn("Hello2/0.1@lasote/stable: Build stuff Hello0", self.client.user_io.out) self.assertIn(" ".join(["Hello3/0.1@lasote/stable: Build stuff Hello1", "Hello3/0.1@lasote/stable: Build stuff Hello0", "Hello3/0.1@lasote/stable: Build stuff Hello2", "Hello3/0.1@lasote/stable: Build stuff Hello0"]), " ".join(str(self.client.user_io.out).splitlines())) self.assertNotIn("Project: Build stuff Hello3", self.client.user_io.out) self.client.run("build") self.assertIn("Project: Build stuff Hello3", self.client.user_io.out) if platform.system() == "Windows": command = "activate && python main.py" else: command = 'bash -c "source activate.sh && python main.py"' self.client.runner(command, cwd=self.client.current_folder) self.assertEqual(['Hello Hello4', 'Hello Hello3', 'Hello Hello1', 'Hello Hello0', 'Hello Hello2', 'Hello Hello0'], str(self.client.user_io.out).splitlines()[-6:])
{ "content_hash": "505cc5d14c2d1ccbf7ad2de5829201cd", "timestamp": "", "source": "github", "line_count": 48, "max_line_length": 94, "avg_line_length": 47.208333333333336, "alnum_prop": 0.5962047661076787, "repo_name": "mropert/conan", "id": "120639ae477bb13e42bcfa52a31c2810eabe0fa1", "size": "2266", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "conans/test/integration/python_diamond_test.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "653" }, { "name": "Python", "bytes": "1898890" }, { "name": "Shell", "bytes": "1342" } ], "symlink_target": "" }
from .cipher_block_chaining import CBC_decrypt from .counter import CTR_decrypt from .electronic_codebook import ECB_decrypt
{ "content_hash": "c4d36dad14c1695a7f335f800982b877", "timestamp": "", "source": "github", "line_count": 3, "max_line_length": 46, "avg_line_length": 41.666666666666664, "alnum_prop": 0.832, "repo_name": "mamikonyana/cryptotools", "id": "c2b8fd937ba5d3f854910b7d41e35be089eb9ac5", "size": "125", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "cryptotools/cipher/mode/__init__.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "6059" } ], "symlink_target": "" }
from __future__ import absolute_import, division, print_function, unicode_literals from h2o.estimators.estimator_base import H2OEstimator from h2o.exceptions import H2OValueError from h2o.frame import H2OFrame from h2o.utils.typechecks import assert_is_type, Enum, numeric class H2ORandomForestEstimator(H2OEstimator): """ Distributed Random Forest """ algo = "drf" def __init__(self, **kwargs): super(H2ORandomForestEstimator, self).__init__() self._parms = {} names_list = {"model_id", "training_frame", "validation_frame", "nfolds", "keep_cross_validation_predictions", "keep_cross_validation_fold_assignment", "score_each_iteration", "score_tree_interval", "fold_assignment", "fold_column", "response_column", "ignored_columns", "ignore_const_cols", "offset_column", "weights_column", "balance_classes", "class_sampling_factors", "max_after_balance_size", "max_confusion_matrix_size", "max_hit_ratio_k", "ntrees", "max_depth", "min_rows", "nbins", "nbins_top_level", "nbins_cats", "r2_stopping", "stopping_rounds", "stopping_metric", "stopping_tolerance", "max_runtime_secs", "seed", "build_tree_one_node", "mtries", "sample_rate", "sample_rate_per_class", "binomial_double_trees", "checkpoint", "col_sample_rate_change_per_level", "col_sample_rate_per_tree", "min_split_improvement", "histogram_type", "categorical_encoding", "calibrate_model", "calibration_frame", "distribution", "custom_metric_func"} if "Lambda" in kwargs: kwargs["lambda_"] = kwargs.pop("Lambda") for pname, pvalue in kwargs.items(): if pname == 'model_id': self._id = pvalue self._parms["model_id"] = pvalue elif pname in names_list: # Using setattr(...) will invoke type-checking of the arguments setattr(self, pname, pvalue) else: raise H2OValueError("Unknown parameter %s = %r" % (pname, pvalue)) @property def training_frame(self): """ Id of the training data frame. Type: ``H2OFrame``. """ return self._parms.get("training_frame") @training_frame.setter def training_frame(self, training_frame): assert_is_type(training_frame, None, H2OFrame) self._parms["training_frame"] = training_frame @property def validation_frame(self): """ Id of the validation data frame. Type: ``H2OFrame``. """ return self._parms.get("validation_frame") @validation_frame.setter def validation_frame(self, validation_frame): assert_is_type(validation_frame, None, H2OFrame) self._parms["validation_frame"] = validation_frame @property def nfolds(self): """ Number of folds for K-fold cross-validation (0 to disable or >= 2). Type: ``int`` (default: ``0``). """ return self._parms.get("nfolds") @nfolds.setter def nfolds(self, nfolds): assert_is_type(nfolds, None, int) self._parms["nfolds"] = nfolds @property def keep_cross_validation_predictions(self): """ Whether to keep the predictions of the cross-validation models. Type: ``bool`` (default: ``False``). """ return self._parms.get("keep_cross_validation_predictions") @keep_cross_validation_predictions.setter def keep_cross_validation_predictions(self, keep_cross_validation_predictions): assert_is_type(keep_cross_validation_predictions, None, bool) self._parms["keep_cross_validation_predictions"] = keep_cross_validation_predictions @property def keep_cross_validation_fold_assignment(self): """ Whether to keep the cross-validation fold assignment. Type: ``bool`` (default: ``False``). """ return self._parms.get("keep_cross_validation_fold_assignment") @keep_cross_validation_fold_assignment.setter def keep_cross_validation_fold_assignment(self, keep_cross_validation_fold_assignment): assert_is_type(keep_cross_validation_fold_assignment, None, bool) self._parms["keep_cross_validation_fold_assignment"] = keep_cross_validation_fold_assignment @property def score_each_iteration(self): """ Whether to score during each iteration of model training. Type: ``bool`` (default: ``False``). """ return self._parms.get("score_each_iteration") @score_each_iteration.setter def score_each_iteration(self, score_each_iteration): assert_is_type(score_each_iteration, None, bool) self._parms["score_each_iteration"] = score_each_iteration @property def score_tree_interval(self): """ Score the model after every so many trees. Disabled if set to 0. Type: ``int`` (default: ``0``). """ return self._parms.get("score_tree_interval") @score_tree_interval.setter def score_tree_interval(self, score_tree_interval): assert_is_type(score_tree_interval, None, int) self._parms["score_tree_interval"] = score_tree_interval @property def fold_assignment(self): """ Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified' option will stratify the folds based on the response variable, for classification problems. One of: ``"auto"``, ``"random"``, ``"modulo"``, ``"stratified"`` (default: ``"auto"``). """ return self._parms.get("fold_assignment") @fold_assignment.setter def fold_assignment(self, fold_assignment): assert_is_type(fold_assignment, None, Enum("auto", "random", "modulo", "stratified")) self._parms["fold_assignment"] = fold_assignment @property def fold_column(self): """ Column with cross-validation fold index assignment per observation. Type: ``str``. """ return self._parms.get("fold_column") @fold_column.setter def fold_column(self, fold_column): assert_is_type(fold_column, None, str) self._parms["fold_column"] = fold_column @property def response_column(self): """ Response variable column. Type: ``str``. """ return self._parms.get("response_column") @response_column.setter def response_column(self, response_column): assert_is_type(response_column, None, str) self._parms["response_column"] = response_column @property def ignored_columns(self): """ Names of columns to ignore for training. Type: ``List[str]``. """ return self._parms.get("ignored_columns") @ignored_columns.setter def ignored_columns(self, ignored_columns): assert_is_type(ignored_columns, None, [str]) self._parms["ignored_columns"] = ignored_columns @property def ignore_const_cols(self): """ Ignore constant columns. Type: ``bool`` (default: ``True``). """ return self._parms.get("ignore_const_cols") @ignore_const_cols.setter def ignore_const_cols(self, ignore_const_cols): assert_is_type(ignore_const_cols, None, bool) self._parms["ignore_const_cols"] = ignore_const_cols @property def offset_column(self): """ [Deprecated] Offset column. This will be added to the combination of columns before applying the link function. Type: ``str``. """ return self._parms.get("offset_column") @offset_column.setter def offset_column(self, offset_column): assert_is_type(offset_column, None, str) self._parms["offset_column"] = offset_column @property def weights_column(self): """ Column with observation weights. Giving some observation a weight of zero is equivalent to excluding it from the dataset; giving an observation a relative weight of 2 is equivalent to repeating that row twice. Negative weights are not allowed. Note: Weights are per-row observation weights and do not increase the size of the data frame. This is typically the number of times a row is repeated, but non-integer values are supported as well. During training, rows with higher weights matter more, due to the larger loss function pre-factor. Type: ``str``. """ return self._parms.get("weights_column") @weights_column.setter def weights_column(self, weights_column): assert_is_type(weights_column, None, str) self._parms["weights_column"] = weights_column @property def balance_classes(self): """ Balance training data class counts via over/under-sampling (for imbalanced data). Type: ``bool`` (default: ``False``). """ return self._parms.get("balance_classes") @balance_classes.setter def balance_classes(self, balance_classes): assert_is_type(balance_classes, None, bool) self._parms["balance_classes"] = balance_classes @property def class_sampling_factors(self): """ Desired over/under-sampling ratios per class (in lexicographic order). If not specified, sampling factors will be automatically computed to obtain class balance during training. Requires balance_classes. Type: ``List[float]``. """ return self._parms.get("class_sampling_factors") @class_sampling_factors.setter def class_sampling_factors(self, class_sampling_factors): assert_is_type(class_sampling_factors, None, [float]) self._parms["class_sampling_factors"] = class_sampling_factors @property def max_after_balance_size(self): """ Maximum relative size of the training data after balancing class counts (can be less than 1.0). Requires balance_classes. Type: ``float`` (default: ``5``). """ return self._parms.get("max_after_balance_size") @max_after_balance_size.setter def max_after_balance_size(self, max_after_balance_size): assert_is_type(max_after_balance_size, None, float) self._parms["max_after_balance_size"] = max_after_balance_size @property def max_confusion_matrix_size(self): """ [Deprecated] Maximum size (# classes) for confusion matrices to be printed in the Logs Type: ``int`` (default: ``20``). """ return self._parms.get("max_confusion_matrix_size") @max_confusion_matrix_size.setter def max_confusion_matrix_size(self, max_confusion_matrix_size): assert_is_type(max_confusion_matrix_size, None, int) self._parms["max_confusion_matrix_size"] = max_confusion_matrix_size @property def max_hit_ratio_k(self): """ Max. number (top K) of predictions to use for hit ratio computation (for multi-class only, 0 to disable) Type: ``int`` (default: ``0``). """ return self._parms.get("max_hit_ratio_k") @max_hit_ratio_k.setter def max_hit_ratio_k(self, max_hit_ratio_k): assert_is_type(max_hit_ratio_k, None, int) self._parms["max_hit_ratio_k"] = max_hit_ratio_k @property def ntrees(self): """ Number of trees. Type: ``int`` (default: ``50``). """ return self._parms.get("ntrees") @ntrees.setter def ntrees(self, ntrees): assert_is_type(ntrees, None, int) self._parms["ntrees"] = ntrees @property def max_depth(self): """ Maximum tree depth. Type: ``int`` (default: ``20``). """ return self._parms.get("max_depth") @max_depth.setter def max_depth(self, max_depth): assert_is_type(max_depth, None, int) self._parms["max_depth"] = max_depth @property def min_rows(self): """ Fewest allowed (weighted) observations in a leaf. Type: ``float`` (default: ``1``). """ return self._parms.get("min_rows") @min_rows.setter def min_rows(self, min_rows): assert_is_type(min_rows, None, numeric) self._parms["min_rows"] = min_rows @property def nbins(self): """ For numerical columns (real/int), build a histogram of (at least) this many bins, then split at the best point Type: ``int`` (default: ``20``). """ return self._parms.get("nbins") @nbins.setter def nbins(self, nbins): assert_is_type(nbins, None, int) self._parms["nbins"] = nbins @property def nbins_top_level(self): """ For numerical columns (real/int), build a histogram of (at most) this many bins at the root level, then decrease by factor of two per level Type: ``int`` (default: ``1024``). """ return self._parms.get("nbins_top_level") @nbins_top_level.setter def nbins_top_level(self, nbins_top_level): assert_is_type(nbins_top_level, None, int) self._parms["nbins_top_level"] = nbins_top_level @property def nbins_cats(self): """ For categorical columns (factors), build a histogram of this many bins, then split at the best point. Higher values can lead to more overfitting. Type: ``int`` (default: ``1024``). """ return self._parms.get("nbins_cats") @nbins_cats.setter def nbins_cats(self, nbins_cats): assert_is_type(nbins_cats, None, int) self._parms["nbins_cats"] = nbins_cats @property def r2_stopping(self): """ r2_stopping is no longer supported and will be ignored if set - please use stopping_rounds, stopping_metric and stopping_tolerance instead. Previous version of H2O would stop making trees when the R^2 metric equals or exceeds this Type: ``float`` (default: ``1.797693135e+308``). """ return self._parms.get("r2_stopping") @r2_stopping.setter def r2_stopping(self, r2_stopping): assert_is_type(r2_stopping, None, numeric) self._parms["r2_stopping"] = r2_stopping @property def stopping_rounds(self): """ Early stopping based on convergence of stopping_metric. Stop if simple moving average of length k of the stopping_metric does not improve for k:=stopping_rounds scoring events (0 to disable) Type: ``int`` (default: ``0``). """ return self._parms.get("stopping_rounds") @stopping_rounds.setter def stopping_rounds(self, stopping_rounds): assert_is_type(stopping_rounds, None, int) self._parms["stopping_rounds"] = stopping_rounds @property def stopping_metric(self): """ Metric to use for early stopping (AUTO: logloss for classification, deviance for regression) One of: ``"auto"``, ``"deviance"``, ``"logloss"``, ``"mse"``, ``"rmse"``, ``"mae"``, ``"rmsle"``, ``"auc"``, ``"lift_top_group"``, ``"misclassification"``, ``"mean_per_class_error"``, ``"r2"`` (default: ``"auto"``). """ return self._parms.get("stopping_metric") @stopping_metric.setter def stopping_metric(self, stopping_metric): assert_is_type(stopping_metric, None, Enum("auto", "deviance", "logloss", "mse", "rmse", "mae", "rmsle", "auc", "lift_top_group", "misclassification", "mean_per_class_error", "r2")) self._parms["stopping_metric"] = stopping_metric @property def stopping_tolerance(self): """ Relative tolerance for metric-based stopping criterion (stop if relative improvement is not at least this much) Type: ``float`` (default: ``0.001``). """ return self._parms.get("stopping_tolerance") @stopping_tolerance.setter def stopping_tolerance(self, stopping_tolerance): assert_is_type(stopping_tolerance, None, numeric) self._parms["stopping_tolerance"] = stopping_tolerance @property def max_runtime_secs(self): """ Maximum allowed runtime in seconds for model training. Use 0 to disable. Type: ``float`` (default: ``0``). """ return self._parms.get("max_runtime_secs") @max_runtime_secs.setter def max_runtime_secs(self, max_runtime_secs): assert_is_type(max_runtime_secs, None, numeric) self._parms["max_runtime_secs"] = max_runtime_secs @property def seed(self): """ Seed for pseudo random number generator (if applicable) Type: ``int`` (default: ``-1``). """ return self._parms.get("seed") @seed.setter def seed(self, seed): assert_is_type(seed, None, int) self._parms["seed"] = seed @property def build_tree_one_node(self): """ Run on one node only; no network overhead but fewer cpus used. Suitable for small datasets. Type: ``bool`` (default: ``False``). """ return self._parms.get("build_tree_one_node") @build_tree_one_node.setter def build_tree_one_node(self, build_tree_one_node): assert_is_type(build_tree_one_node, None, bool) self._parms["build_tree_one_node"] = build_tree_one_node @property def mtries(self): """ Number of variables randomly sampled as candidates at each split. If set to -1, defaults to sqrt{p} for classification and p/3 for regression (where p is the # of predictors Type: ``int`` (default: ``-1``). """ return self._parms.get("mtries") @mtries.setter def mtries(self, mtries): assert_is_type(mtries, None, int) self._parms["mtries"] = mtries @property def sample_rate(self): """ Row sample rate per tree (from 0.0 to 1.0) Type: ``float`` (default: ``0.6320000291``). """ return self._parms.get("sample_rate") @sample_rate.setter def sample_rate(self, sample_rate): assert_is_type(sample_rate, None, numeric) self._parms["sample_rate"] = sample_rate @property def sample_rate_per_class(self): """ A list of row sample rates per class (relative fraction for each class, from 0.0 to 1.0), for each tree Type: ``List[float]``. """ return self._parms.get("sample_rate_per_class") @sample_rate_per_class.setter def sample_rate_per_class(self, sample_rate_per_class): assert_is_type(sample_rate_per_class, None, [numeric]) self._parms["sample_rate_per_class"] = sample_rate_per_class @property def binomial_double_trees(self): """ For binary classification: Build 2x as many trees (one per class) - can lead to higher accuracy. Type: ``bool`` (default: ``False``). """ return self._parms.get("binomial_double_trees") @binomial_double_trees.setter def binomial_double_trees(self, binomial_double_trees): assert_is_type(binomial_double_trees, None, bool) self._parms["binomial_double_trees"] = binomial_double_trees @property def checkpoint(self): """ Model checkpoint to resume training with. Type: ``str``. """ return self._parms.get("checkpoint") @checkpoint.setter def checkpoint(self, checkpoint): assert_is_type(checkpoint, None, str, H2OEstimator) self._parms["checkpoint"] = checkpoint @property def col_sample_rate_change_per_level(self): """ Relative change of the column sampling rate for every level (must be > 0.0 and <= 2.0) Type: ``float`` (default: ``1``). """ return self._parms.get("col_sample_rate_change_per_level") @col_sample_rate_change_per_level.setter def col_sample_rate_change_per_level(self, col_sample_rate_change_per_level): assert_is_type(col_sample_rate_change_per_level, None, numeric) self._parms["col_sample_rate_change_per_level"] = col_sample_rate_change_per_level @property def col_sample_rate_per_tree(self): """ Column sample rate per tree (from 0.0 to 1.0) Type: ``float`` (default: ``1``). """ return self._parms.get("col_sample_rate_per_tree") @col_sample_rate_per_tree.setter def col_sample_rate_per_tree(self, col_sample_rate_per_tree): assert_is_type(col_sample_rate_per_tree, None, numeric) self._parms["col_sample_rate_per_tree"] = col_sample_rate_per_tree @property def min_split_improvement(self): """ Minimum relative improvement in squared error reduction for a split to happen Type: ``float`` (default: ``1e-05``). """ return self._parms.get("min_split_improvement") @min_split_improvement.setter def min_split_improvement(self, min_split_improvement): assert_is_type(min_split_improvement, None, numeric) self._parms["min_split_improvement"] = min_split_improvement @property def histogram_type(self): """ What type of histogram to use for finding optimal split points One of: ``"auto"``, ``"uniform_adaptive"``, ``"random"``, ``"quantiles_global"``, ``"round_robin"`` (default: ``"auto"``). """ return self._parms.get("histogram_type") @histogram_type.setter def histogram_type(self, histogram_type): assert_is_type(histogram_type, None, Enum("auto", "uniform_adaptive", "random", "quantiles_global", "round_robin")) self._parms["histogram_type"] = histogram_type @property def categorical_encoding(self): """ Encoding scheme for categorical features One of: ``"auto"``, ``"enum"``, ``"one_hot_internal"``, ``"one_hot_explicit"``, ``"binary"``, ``"eigen"``, ``"label_encoder"``, ``"sort_by_response"``, ``"enum_limited"`` (default: ``"auto"``). """ return self._parms.get("categorical_encoding") @categorical_encoding.setter def categorical_encoding(self, categorical_encoding): assert_is_type(categorical_encoding, None, Enum("auto", "enum", "one_hot_internal", "one_hot_explicit", "binary", "eigen", "label_encoder", "sort_by_response", "enum_limited")) self._parms["categorical_encoding"] = categorical_encoding @property def calibrate_model(self): """ Use Platt Scaling to calculate calibrated class probabilities. Calibration can provide more accurate estimates of class probabilities. Type: ``bool`` (default: ``False``). """ return self._parms.get("calibrate_model") @calibrate_model.setter def calibrate_model(self, calibrate_model): assert_is_type(calibrate_model, None, bool) self._parms["calibrate_model"] = calibrate_model @property def calibration_frame(self): """ Calibration frame for Platt Scaling Type: ``H2OFrame``. """ return self._parms.get("calibration_frame") @calibration_frame.setter def calibration_frame(self, calibration_frame): assert_is_type(calibration_frame, None, H2OFrame) self._parms["calibration_frame"] = calibration_frame @property def distribution(self): """ [Deprecated] Distribution function One of: ``"auto"``, ``"bernoulli"``, ``"multinomial"``, ``"gaussian"``, ``"poisson"``, ``"gamma"``, ``"tweedie"``, ``"laplace"``, ``"quantile"``, ``"huber"`` (default: ``"auto"``). """ return self._parms.get("distribution") @distribution.setter def distribution(self, distribution): assert_is_type(distribution, None, Enum("auto", "bernoulli", "multinomial", "gaussian", "poisson", "gamma", "tweedie", "laplace", "quantile", "huber")) self._parms["distribution"] = distribution @property def custom_metric_func(self): """ Reference to custom evaluation function, format: `language:keyName=funcName` Type: ``str``. """ return self._parms.get("custom_metric_func") @custom_metric_func.setter def custom_metric_func(self, custom_metric_func): assert_is_type(custom_metric_func, None, str) self._parms["custom_metric_func"] = custom_metric_func
{ "content_hash": "c6b16af4762ed39e12dec15c49d9899d", "timestamp": "", "source": "github", "line_count": 749, "max_line_length": 189, "avg_line_length": 32.81842456608812, "alnum_prop": 0.6096578658313332, "repo_name": "spennihana/h2o-3", "id": "e468813a81db713acd9613da374db37d47713336", "size": "24782", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "h2o-py/h2o/estimators/random_forest.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "12629" }, { "name": "CSS", "bytes": "257122" }, { "name": "CoffeeScript", "bytes": "273112" }, { "name": "Emacs Lisp", "bytes": "2226" }, { "name": "Groovy", "bytes": "125187" }, { "name": "HTML", "bytes": "2111506" }, { "name": "Java", "bytes": "9481047" }, { "name": "JavaScript", "bytes": "87944" }, { "name": "Jupyter Notebook", "bytes": "6165027" }, { "name": "Makefile", "bytes": "42233" }, { "name": "Python", "bytes": "4982123" }, { "name": "R", "bytes": "2699289" }, { "name": "Ruby", "bytes": "3506" }, { "name": "Scala", "bytes": "32768" }, { "name": "Shell", "bytes": "179758" }, { "name": "TeX", "bytes": "657375" } ], "symlink_target": "" }
""" pyClanSphere.plugins.gamesquad.forms ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Forms we gonna need to handle creation and editing of entries :copyright: (c) 2009 - 2010 by the pyClanSphere Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from pyClanSphere.api import * from pyClanSphere.models import User from pyClanSphere.utils import forms from pyClanSphere.utils.validators import ValidationError, is_not_whitespace_only from pyClanSphere.plugins.gamesquad.models import Game, Squad, SquadMember, Level, GameAccount class _GameBoundForm(forms.Form): """Internal baseclass for games bound forms.""" def __init__(self, game, initial=None): forms.Form.__init__(self, initial) self.app = get_application() self.game = game def as_widget(self): widget = forms.Form.as_widget(self) widget.game = self.game widget.new = self.game is None return widget class EditGameForm(_GameBoundForm): """Edit or create a game.""" gamename = forms.TextField(lazy_gettext(u'Gamename'), max_length=50, validators=[is_not_whitespace_only()], required=True) def __init__(self, game=None, initial=None): if game is not None: initial = forms.fill_dict(initial, gamename=game.name ) _GameBoundForm.__init__(self, game, initial) def validate_gamename(self, value): query = Game.query.filter_by(name=value) if self.game is not None: query = query.filter(Game.id != self.game.id) if query.first() is not None: raise ValidationError(_('This gamename is already in use')) def make_game(self): """A helper function that creates a new game object.""" game = Game(self.data['gamename']) self.game = game return game def save_changes(self): """Apply the changes.""" self.game.name = self.data['gamename'] class DeleteGameForm(_GameBoundForm): """Used to delete a game from the admin panel.""" action = forms.ChoiceField(lazy_gettext(u'What should pyClanSphere do with squads ' u'assigned to this group?'), choices=[ ('delete_membership', lazy_gettext(u'Delete game, remove squads')), ('relocate', lazy_gettext(u'Move the squads to another game')) ], widget=forms.RadioButtonGroup) relocate_to = forms.ModelField(Game, 'id', lazy_gettext(u'Relocate squad to'), widget=forms.SelectBox) def __init__(self, game, initial=None): self.relocate_to.choices = [('', u'')] + [ (g.id, g.name) for g in Game.query.filter(Game.id != game.id) ] _GameBoundForm.__init__(self, game, forms.fill_dict(initial, action='delete_membership')) def context_validate(self, data): if data['action'] == 'relocate' and not data['relocate_to']: raise ValidationError(_('You have to select a game which ' 'the squad gets assigned to.')) def delete_game(self): """Deletes a game.""" if self.data['action'] == 'relocate': new_game = Game.query.filter_by(id=self.data['relocate_to'].id).first() for squad in self.game.squads: new_game.squads.append(squad) db.commit() signals.before_game_deleted.send(game=self.game, formdata=self.data) db.delete(self.game) class _SquadBoundForm(forms.Form): """Internal baseclass for squads bound forms.""" def __init__(self, squad, initial=None): forms.Form.__init__(self, initial) self.app = get_application() self.squad = squad def as_widget(self): widget = forms.Form.as_widget(self) widget.squad = self.squad widget.new = self.squad is None return widget class EditSquadForm(_SquadBoundForm): """Edit or create a squad.""" squadname = forms.TextField(lazy_gettext(u'Squadname'), max_length=50, validators=[is_not_whitespace_only()], required=True) game = forms.ModelField(Game, 'id', lazy_gettext(u'Belongs to'), widget=forms.SelectBox) tag = forms.TextField(lazy_gettext(u'Squad Tag'), max_length=20, validators=[is_not_whitespace_only()]) def __init__(self, squad=None, initial=None): if squad is not None: initial = forms.fill_dict(initial, squadname=squad.name, game=squad.game, tag=squad.tag ) _SquadBoundForm.__init__(self, squad, initial) self.game.choices = [(game.id, game.name) for game in Game.query.all()] def _set_common_attributes(self, squad): squad.game = self.data['game'] forms.set_fields(squad, self.data) def make_squad(self): """A helper function that creates a new squad object.""" squad = Squad(self.data['game'], self.data['squadname']) self._set_common_attributes(squad) self.squad = squad return squad def save_changes(self): """Apply the changes.""" self.squad.name = self.data['squadname'] self._set_common_attributes(self.squad) class DeleteSquadForm(_SquadBoundForm): """Used to delete a squad from the admin panel.""" action = forms.ChoiceField(lazy_gettext(u'What should pyClanSphere do with members ' u'assigned to this squad?'), choices=[ ('delete_membership', lazy_gettext(u'Delete squad, remove squadmemberships')), ('relocate', lazy_gettext(u'Move the members to another squad')) ], widget=forms.RadioButtonGroup) relocate_to = forms.ModelField(Squad, 'id', lazy_gettext(u'Relocate members to'), widget=forms.SelectBox) def __init__(self, squad, initial=None): self.relocate_to.choices = [('', u'')] + [ (g.id, g.name) for g in Squad.query.filter(Squad.id != squad.id) ] _SquadBoundForm.__init__(self, squad, forms.fill_dict(initial, action='delete_membership')) def context_validate(self, data): if data['action'] == 'relocate' and not data['relocate_to']: raise ValidationError(_('You have to select a squad which ' 'the squad gets assigned to.')) def delete_squad(self): """Deletes a squad.""" if self.data['action'] == 'relocate': new_squad = Squad.query.filter_by(id=self.data['relocate_to'].id).first() for squadmember in self.squad.squadmembers: if squadmember not in new_squad.squadmembers: squadmember.squad_id = new_squad.id db.commit() signals.before_squad_deleted.send(squad=self.squad, formdata=self.data) db.delete(self.squad) class _SquadMemberBoundForm(forms.Form): """Internal baseclass for squadmember bound forms.""" def __init__(self, squadmember, initial=None): forms.Form.__init__(self, initial) self.app = get_application() self.squadmember = squadmember def as_widget(self): widget = forms.Form.as_widget(self) widget.squadmember = self.squadmember widget.new = self.squadmember is None return widget class EditSquadMemberForm(_SquadMemberBoundForm): """Decide whos in our squad.""" clanmember = forms.ModelField(User, 'id', lazy_gettext(u'Clanmember'), widget=forms.SelectBox) level = forms.ModelField(Level, 'id', lazy_gettext(u'Level'), widget=forms.SelectBox) othertasks = forms.TextField(lazy_gettext(u'Other tasks'), max_length=100, validators=[is_not_whitespace_only()]) def __init__(self, squad, squadmember=None, initial=None): if squadmember is not None: initial = forms.fill_dict(initial, clanmember=squadmember.user, level=squadmember.level, othertasks=squadmember.othertasks ) _SquadMemberBoundForm.__init__(self, squadmember, initial) assert squad is not None self.squad = squad # Need access to squad here, as the member might be new and thus there is no # member.squad relation yet. self.clanmember.choices = [(user.id, user.display_name) for \ user in User.query.namesort().all() if user not in self.squad.members] if self.squadmember: self.clanmember.choices.insert(0,(squadmember.user.id, squadmember.user.display_name)) self.level.choices = [(level.id, level.name) for level in Level.query.order_by(Level.ordering).all()] def make_squadmember(self): """A helper function that creates new SquadMember objects.""" squadmember = SquadMember(self.data['clanmember']) self._set_common_attributes(squadmember) self.new_squadmember = squadmember return squadmember def _set_common_attributes(self, squadmember): squadmember.squad = self.squad squadmember.clanmember = self.data['clanmember'] squadmember.level = self.data['level'] squadmember.othertasks = self.data['othertasks'] def save_changes(self): """Apply the changes.""" self._set_common_attributes(self.squadmember) class DeleteSquadMemberForm(_SquadMemberBoundForm): """Used to remove a member from a squad.""" def delete_member(self): """Deletes the user.""" db.delete(self.squadmember) class _LevelBoundForm(forms.Form): """Internal baseclass for levels bound forms.""" def __init__(self, level, initial=None): forms.Form.__init__(self, initial) self.app = get_application() self.level = level def as_widget(self): widget = forms.Form.as_widget(self) widget.level = self.level widget.new = self.level is None return widget class EditLevelForm(_LevelBoundForm): """Edit or create a level.""" levelname = forms.TextField(lazy_gettext(u'Levelname'), max_length=32, validators=[is_not_whitespace_only()], required=True) ordering = forms.IntegerField(lazy_gettext(u'Order'), help_text=_('Sorting order, ascending')) def __init__(self, level=None, initial=None): if level is not None: initial = forms.fill_dict(initial, levelname=level.name, ordering=level.ordering ) _LevelBoundForm.__init__(self, level, initial) def validate_levelname(self, value): query = Level.query.filter_by(name=value) if self.level is not None: query = query.filter(Level.id != self.level.id) if query.first() is not None: raise ValidationError(_('This levelname is already in use')) def validate_ordering(self, level): if level < 0: raise ValidationError(_('Ordering with positive integers or zero only')) def _set_common_attributes(self, level): forms.set_fields(level, self.data, 'ordering') if self.data['ordering'] is None: level.ordering = Level.query.count()-1 else: level.ordering = self.data['ordering'] def make_level(self): """A helper function that creates a new level object.""" level = Level(self.data['levelname']) self._set_common_attributes(level) self.level = level return level def save_changes(self): """Apply the changes.""" self.level.name = self.data['levelname'] self._set_common_attributes(self.level) class DeleteLevelForm(_LevelBoundForm): """Used to delete a level from the admin panel.""" relocate_to = forms.ModelField(Level, 'id', lazy_gettext(u'Reassign squadmembers to'), widget=forms.SelectBox) def __init__(self, level, initial=None): self.relocate_to.choices = [('', u'')] + [ (g.id, g.name) for g in Level.query.filter(Level.id != level.id) ] _LevelBoundForm.__init__(self, level, forms.fill_dict(initial, action='delete_membership')) def context_validate(self, data): if not data['relocate_to']: raise ValidationError(_('You have to select a level which ' 'the squadmembers get assigned to.')) def delete_level(self): """Deletes a level.""" new_level = Level.query.filter_by(id=self.data['relocate_to'].id).first() for squadmember in SquadMember.query.filter_by(level_id=self.level.id): squadmember.level = new_level db.commit() signals.before_level_deleted.send(level=self.level, formdata=self.data) db.delete(self.level) class _GameAccountBoundForm(forms.Form): """Internal baseclass for game account bound forms.""" def __init__(self, gameaccount, initial=None): forms.Form.__init__(self, initial) self.app = get_application() self.gameaccount = gameaccount def as_widget(self): widget = forms.Form.as_widget(self) widget.gameaccount = self.gameaccount return widget class EditGameAccountForm(_GameAccountBoundForm): """Update Players' Game Accounts.""" game = forms.ModelField(Game, 'id', lazy_gettext(u'Game'), widget=forms.SelectBox) account = forms.TextField(lazy_gettext(u'Account ID'), max_length=100, validators=[is_not_whitespace_only()]) def __init__(self, user, gameaccount=None, initial=None): if gameaccount is not None: initial = forms.fill_dict(initial, game=gameaccount.game, account=gameaccount.account ) _GameAccountBoundForm.__init__(self, gameaccount, initial) self.user = user self.game.choices = [(game.id, game.name) for game in Game.query.all()] def make_gameaccount(self): """A helper function that creates new GameAccount objects.""" gameaccount = GameAccount(self.data['game'], self.user, self.data['account']) self.gameaccount = gameaccount return gameaccount def context_validate(self, data): query = GameAccount.query.filter_by(game_id=data['game'].id).filter_by(account=data['account']) if self.gameaccount is not None: query = query.filter(GameAccount.id != self.gameaccount.id) if query.first() is not None: raise ValidationError(_('This account is already registered')) def _set_common_attributes(self, gameaccount): gameaccount.user = self.user gameaccount.game = self.data['game'] gameaccount.account = self.data['account'] def save_changes(self): """Apply the changes.""" self._set_common_attributes(self.gameaccount) class DeleteGameAccountForm(_GameAccountBoundForm): """Used to remove a member from a squad.""" def delete_account(self): """Deletes the game account.""" signals.before_gameaccount_deleted.send(gameaccount=self.gameaccount) db.delete(self.gameaccount)
{ "content_hash": "76b86dc4badec41a37e91a7fe53ce6b4", "timestamp": "", "source": "github", "line_count": 423, "max_line_length": 109, "avg_line_length": 37.061465721040186, "alnum_prop": 0.5988390635963513, "repo_name": "jokey2k/pyClanSphere", "id": "3bf41b451c6181daf78b1bf6ad75099b92a4a0af", "size": "15701", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pyClanSphere/plugins/gamesquad/forms.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "JavaScript", "bytes": "188174" }, { "name": "Python", "bytes": "891594" } ], "symlink_target": "" }
from nose.tools import * from dateutil.parser import parse as time_parse import yawhois class TestWhoisAscioComStatusAvailable(object): def setUp(self): fixture_path = "spec/fixtures/responses/whois.ascio.com/status_available.txt" host = "whois.ascio.com" part = yawhois.record.Part(open(fixture_path, "r").read(), host) self.record = yawhois.record.Record(None, [part]) def test_status(self): eq_(self.record.status, 'available') def test_available(self): eq_(self.record.available, True) def test_domain(self): eq_(self.record.domain, None) def test_nameservers(self): eq_(self.record.nameservers.__class__.__name__, 'list') eq_(self.record.nameservers, []) def test_admin_contacts(self): eq_(self.record.admin_contacts.__class__.__name__, 'list') eq_(self.record.admin_contacts, []) def test_registered(self): eq_(self.record.registered, False) def test_created_on(self): eq_(self.record.created_on, None) def test_registrar(self): eq_(self.record.registrar, None) def test_registrant_contacts(self): eq_(self.record.registrant_contacts.__class__.__name__, 'list') eq_(self.record.registrant_contacts, []) def test_technical_contacts(self): eq_(self.record.technical_contacts.__class__.__name__, 'list') eq_(self.record.technical_contacts, []) def test_updated_on(self): eq_(self.record.updated_on, None) def test_domain_id(self): eq_(self.record.domain_id, None) def test_expires_on(self): eq_(self.record.expires_on, None)
{ "content_hash": "6bc1a86300f5b85bcaf83d2f24d1cab2", "timestamp": "", "source": "github", "line_count": 54, "max_line_length": 85, "avg_line_length": 31.185185185185187, "alnum_prop": 0.6276722090261283, "repo_name": "huyphan/pyyawhois", "id": "2c47dbafc21250b64edc6c7c3311e9939d0f6743", "size": "1947", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "test/record/parser/test_response_whois_ascio_com_status_available.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "1859653" } ], "symlink_target": "" }
class PebkacError(RuntimeError): """An error that was caused by a misconfiguration or error in a snippet, i.e. caused by the user. Hence: "Problem exists between keyboard and chair". """ pass
{ "content_hash": "93e084a4f14b371b9cedc26af270ea14", "timestamp": "", "source": "github", "line_count": 7, "max_line_length": 76, "avg_line_length": 30.428571428571427, "alnum_prop": 0.6854460093896714, "repo_name": "khatchad/vimrc", "id": "2a43d252e7e27117a1c6786246e9618c295264b6", "size": "255", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "sources_non_forked/ultisnips/pythonx/UltiSnips/error.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "961" }, { "name": "C", "bytes": "11028" }, { "name": "C#", "bytes": "1235" }, { "name": "C++", "bytes": "3464" }, { "name": "CMake", "bytes": "3900" }, { "name": "CSS", "bytes": "950" }, { "name": "Clojure", "bytes": "720" }, { "name": "CoffeeScript", "bytes": "11440" }, { "name": "Crystal", "bytes": "9834" }, { "name": "Dart", "bytes": "4388" }, { "name": "Dockerfile", "bytes": "2148" }, { "name": "Elixir", "bytes": "1903" }, { "name": "Elm", "bytes": "5333" }, { "name": "Emacs Lisp", "bytes": "4563" }, { "name": "Go", "bytes": "1113" }, { "name": "HTML", "bytes": "1634" }, { "name": "Haml", "bytes": "39" }, { "name": "Haskell", "bytes": "863" }, { "name": "Java", "bytes": "9033" }, { "name": "JavaScript", "bytes": "10452" }, { "name": "Lua", "bytes": "19732" }, { "name": "Makefile", "bytes": "16292" }, { "name": "PHP", "bytes": "2726" }, { "name": "PowerShell", "bytes": "10114" }, { "name": "PureScript", "bytes": "7576" }, { "name": "Python", "bytes": "392724" }, { "name": "R", "bytes": "1288" }, { "name": "Ruby", "bytes": "119025" }, { "name": "Rust", "bytes": "6153" }, { "name": "SCSS", "bytes": "1801" }, { "name": "Scala", "bytes": "1504" }, { "name": "Shell", "bytes": "40972" }, { "name": "TypeScript", "bytes": "4661" }, { "name": "VBScript", "bytes": "7510" }, { "name": "Vim Script", "bytes": "13029765" }, { "name": "Vim Snippet", "bytes": "785859" }, { "name": "Vue", "bytes": "662" } ], "symlink_target": "" }
GstreamerPackage ('gstreamer', 'gst-plugins-bad', '0.10.23', configure_flags = [ ' --disable-gtk-doc', ' --with-plugins=quicktime', ' --disable-apexsink', ' --disable-bz2', ' --disable-metadata', ' --disable-oss4', ' --disable-theoradec' ])
{ "content_hash": "6b3f347756831051efa2ee9c1d1ff348", "timestamp": "", "source": "github", "line_count": 9, "max_line_length": 80, "avg_line_length": 27.555555555555557, "alnum_prop": 0.6330645161290323, "repo_name": "BansheeMediaPlayer/bockbuild", "id": "7bd6b10ce7f00f850a64373d069aeb47c96fc5f3", "size": "248", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "packages/gst-plugins-bad010.py", "mode": "33188", "license": "mit", "language": [ { "name": "C#", "bytes": "35710" }, { "name": "Makefile", "bytes": "2017" }, { "name": "Python", "bytes": "200837" }, { "name": "Shell", "bytes": "36817" } ], "symlink_target": "" }
class AbstractConnection: """An abstract connection to send and receive data This is used to abstract the underlying communication stream from the implementation of the miniSSL protocol. """ def __init__(self): self._close_handler = None self._receive_handler = None self.is_open = False # The expected common name of the endpoint # # This can be used for cryptographic verification if certificate of the # endpoint is transmitted. self.common_name = None def close(self): """Closes the connection and calls the close handler Calling close on a closed connection has no effect. """ if self.is_open: self.is_open = False if not self._close_handler is None: self._close_handler(self) def send(self, data): """Send raw data to the endpoint of the connection :param data: An array of bytes """ pass def set_close_handler(self, handler): """Sets a handler that will be called when the connection was closed :param handler: The handler to call The handler will be called as follows if either end closes the connection: handler(self) where self is a reference to the connection that was closed. """ self._close_handler = handler def set_receive_handler(self, handler): """Sets a handler that will be called when data is received :param handler: The handler to be called. The handler will be invoked as follows: handler(self, data) where self is a reference to the connection and data is an array of bytes. Depending on the underlying implementation of the connection, the handler might be called by a separate thread. It is the duty of the handler to deal with it. """ self._receive_handler = handler
{ "content_hash": "d4c31db7d000cdb1a6cd5b946719c7b5", "timestamp": "", "source": "github", "line_count": 68, "max_line_length": 79, "avg_line_length": 29.573529411764707, "alnum_prop": 0.6116360019890602, "repo_name": "vsaw/miniSSL", "id": "773dcf2f7e82be1a07f1c31bca9f827cb2ed4e23", "size": "2011", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "minissl/AbstractConnection.py", "mode": "33261", "license": "mit", "language": [ { "name": "Python", "bytes": "80513" } ], "symlink_target": "" }
"""BR-specific Form helpers.""" from __future__ import unicode_literals import re import warnings from django.core.validators import EMPTY_VALUES from django.forms import ValidationError from django.forms.fields import CharField, Field, RegexField, Select from django.utils.encoding import force_text from django.utils.translation import ugettext_lazy as _ from localflavor.compat import EmptyValueCompatMixin from localflavor.deprecation import DeprecatedPhoneNumberFormFieldMixin, RemovedInLocalflavor20Warning from .br_states import STATE_CHOICES phone_digits_re = re.compile(r'^(\d{2})[-\.]?(\d{4,5})[-\.]?(\d{4})$') cpf_digits_re = re.compile(r'^(\d{3})\.(\d{3})\.(\d{3})-(\d{2})$') cnpj_digits_re = re.compile( r'^(\d{2})[.-]?(\d{3})[.-]?(\d{3})/(\d{4})-(\d{2})$' ) process_digits_re = re.compile( r'^(\d{7})-?(\d{2})\.?(\d{4})\.?(\d)\.?(\d{2})\.?(\d{4})$' ) class BRZipCodeField(RegexField): """A form field that validates input as a Brazilian zip code, with the format XXXXX-XXX.""" default_error_messages = { 'invalid': _('Enter a zip code in the format XXXXX-XXX.'), } def __init__(self, max_length=None, min_length=None, *args, **kwargs): super(BRZipCodeField, self).__init__(r'^\d{5}-\d{3}$', max_length, min_length, *args, **kwargs) class BRPhoneNumberField(Field, DeprecatedPhoneNumberFormFieldMixin): """ A form field that validates input as a Brazilian phone number. The phone number must be in either of the following formats: XX-XXXX-XXXX or XX-XXXXX-XXXX. """ default_error_messages = { 'invalid': _(('Phone numbers must be in either of the following ' 'formats: XX-XXXX-XXXX or XX-XXXXX-XXXX.')), } def clean(self, value): super(BRPhoneNumberField, self).clean(value) if value in EMPTY_VALUES: return '' value = re.sub('(\(|\)|\s+)', '', force_text(value)) m = phone_digits_re.search(value) if m: return '%s-%s-%s' % (m.group(1), m.group(2), m.group(3)) raise ValidationError(self.error_messages['invalid']) class BRStateSelect(Select): """A Select widget that uses a list of Brazilian states/territories as its choices.""" def __init__(self, attrs=None): super(BRStateSelect, self).__init__(attrs, choices=STATE_CHOICES) class BRStateChoiceField(Field): """A choice field that uses a list of Brazilian states as its choices.""" widget = Select default_error_messages = { 'invalid': _('Select a valid brazilian state. That state is not one of the available states.'), } def __init__(self, required=True, widget=None, label=None, initial=None, help_text=None): super(BRStateChoiceField, self).__init__(required, widget, label, initial, help_text) self.widget.choices = STATE_CHOICES def clean(self, value): value = super(BRStateChoiceField, self).clean(value) if value in EMPTY_VALUES: value = '' value = force_text(value) if value == '': return value valid_values = set([force_text(entry[0]) for entry in self.widget.choices]) if value not in valid_values: raise ValidationError(self.error_messages['invalid']) return value def dv_maker(v): if v >= 2: return 11 - v return 0 def DV_maker(v): # noqa warnings.warn('DV_maker is deprecated. Please use dv_maker instead.', RemovedInLocalflavor20Warning) return dv_maker(v) class BRCPFField(EmptyValueCompatMixin, CharField): """ A form field that validates a CPF number or a CPF string. A CPF number is compounded by XXX.XXX.XXX-VD. The two last digits are check digits. More information: http://en.wikipedia.org/wiki/Cadastro_de_Pessoas_F%C3%ADsicas """ default_error_messages = { 'invalid': _("Invalid CPF number."), 'max_digits': _("This field requires at most 11 digits or 14 characters."), } def __init__(self, max_length=14, min_length=11, *args, **kwargs): super(BRCPFField, self).__init__(max_length, min_length, *args, **kwargs) def clean(self, value): """Value can be either a string in the format XXX.XXX.XXX-XX or an 11-digit number.""" value = super(BRCPFField, self).clean(value) if value in self.empty_values: return self.empty_value orig_value = value[:] if not value.isdigit(): cpf = cpf_digits_re.search(value) if cpf: value = ''.join(cpf.groups()) else: raise ValidationError(self.error_messages['invalid']) if len(value) != 11: raise ValidationError(self.error_messages['max_digits']) orig_dv = value[-2:] new_1dv = sum([i * int(value[idx]) for idx, i in enumerate(range(10, 1, -1))]) new_1dv = dv_maker(new_1dv % 11) value = value[:-2] + str(new_1dv) + value[-1] new_2dv = sum([i * int(value[idx]) for idx, i in enumerate(range(11, 1, -1))]) new_2dv = dv_maker(new_2dv % 11) value = value[:-1] + str(new_2dv) if value[-2:] != orig_dv: raise ValidationError(self.error_messages['invalid']) if value.count(value[0]) == 11: raise ValidationError(self.error_messages['invalid']) return orig_value class BRCNPJField(EmptyValueCompatMixin, CharField): """ A form field that validates input as `Brazilian CNPJ`_. Input can either be of the format XX.XXX.XXX/XXXX-XX or be a group of 14 digits. If you want to use the long format only, you can specify: brcnpj_field = BRCNPJField(min_length=16) If you want to use the short format, you can specify: brcnpj_field = BRCNPJField(max_length=14) Otherwise both formats will be valid. .. _Brazilian CNPJ: http://en.wikipedia.org/wiki/National_identification_number#Brazil .. versionchanged:: 1.4 """ default_error_messages = { 'invalid': _("Invalid CNPJ number."), 'max_digits': _("This field requires at least 14 digits"), } def __init__(self, min_length=14, max_length=18, *args, **kwargs): super(BRCNPJField, self).__init__(max_length, min_length, *args, **kwargs) def clean(self, value): """Value can be either a string in the format XX.XXX.XXX/XXXX-XX or a group of 14 characters.""" value = super(BRCNPJField, self).clean(value) if value in self.empty_values: return self.empty_value orig_value = value[:] if not value.isdigit(): cnpj = cnpj_digits_re.search(value) if cnpj: value = ''.join(cnpj.groups()) else: raise ValidationError(self.error_messages['invalid']) if len(value) != 14: raise ValidationError(self.error_messages['max_digits']) orig_dv = value[-2:] new_1dv = sum([i * int(value[idx]) for idx, i in enumerate(list(range(5, 1, -1)) + list(range(9, 1, -1)))]) new_1dv = dv_maker(new_1dv % 11) value = value[:-2] + str(new_1dv) + value[-1] new_2dv = sum([i * int(value[idx]) for idx, i in enumerate(list(range(6, 1, -1)) + list(range(9, 1, -1)))]) new_2dv = dv_maker(new_2dv % 11) value = value[:-1] + str(new_2dv) if value[-2:] != orig_dv: raise ValidationError(self.error_messages['invalid']) return orig_value def mod_97_base10(value): return 98 - ((value * 100 % 97) % 97) class BRProcessoField(EmptyValueCompatMixin, CharField): """ A form field that validates a Legal Process(Processo) number or a Legal Process string. A Processo number is compounded by NNNNNNN-DD.AAAA.J.TR.OOOO. The two DD digits are check digits. More information: http://www.cnj.jus.br/busca-atos-adm?documento=2748 .. versionadded:: 1.2 """ default_error_messages = {'invalid': _("Invalid Process number.")} def __init__(self, max_length=25, min_length=20, *args, **kwargs): super(BRProcessoField, self).__init__(max_length, min_length, *args, **kwargs) def clean(self, value): """Value can be either a string in the format NNNNNNN-DD.AAAA.J.TR.OOOO or an 20-digit number.""" value = super(BRProcessoField, self).clean(value) if value in self.empty_values: return self.empty_value orig_value = value[:] if not value.isdigit(): process_number = process_digits_re.search(value) if process_number: value = ''.join(process_number.groups()) else: raise ValidationError(self.error_messages['invalid']) orig_dv = value[7:9] value_without_digits = int(value[0:7] + value[9:]) if str(mod_97_base10(value_without_digits)).zfill(2) != orig_dv: raise ValidationError(self.error_messages['invalid']) return orig_value
{ "content_hash": "3db91489917cd8e8b6716225e462dbc3", "timestamp": "", "source": "github", "line_count": 254, "max_line_length": 115, "avg_line_length": 35.71653543307087, "alnum_prop": 0.6083553791887125, "repo_name": "jieter/django-localflavor", "id": "6a52db474f454c7d619cb9634235baf7557a80d9", "size": "9096", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "localflavor/br/forms.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "896597" } ], "symlink_target": "" }
"""Dynamic Imaging of Coherent Sources (DICS).""" # Authors: Marijn van Vliet <w.m.vanvliet@gmail.com> # Britta Westner <britta.wstnr@gmail.com> # Susanna Aro <susanna.aro@aalto.fi> # Roman Goj <roman.goj@gmail.com> # # License: BSD-3-Clause import numpy as np from ..channels import equalize_channels from ..io.pick import pick_info, pick_channels from ..utils import (logger, verbose, _check_one_ch_type, _check_channels_spatial_filter, _check_rank, _check_option, _validate_type, warn) from ..forward import _subject_from_forward from ..minimum_norm.inverse import combine_xyz, _check_reference, _check_depth from ..rank import compute_rank from ..source_estimate import _make_stc, _get_src_type from ..time_frequency import EpochsTFR from ..time_frequency.tfr import _check_tfr_complex from ._compute_beamformer import (_prepare_beamformer_input, _compute_beamformer, _check_src_type, Beamformer, _compute_power, _proj_whiten_data) @verbose def make_dics(info, forward, csd, reg=0.05, noise_csd=None, label=None, pick_ori=None, rank=None, weight_norm=None, reduce_rank=False, depth=1., real_filter=True, inversion='matrix', verbose=None): """Compute a Dynamic Imaging of Coherent Sources (DICS) spatial filter. This is a beamformer filter that can be used to estimate the source power at a specific frequency range :footcite:`GrossEtAl2001`. It does this by constructing a spatial filter for each source point. The computation of these filters is very similar to those of the LCMV beamformer (:func:`make_lcmv`), but instead of operating on a covariance matrix, the CSD matrix is used. When applying these filters to a CSD matrix (see :func:`apply_dics_csd`), the source power can be estimated for each source point. Parameters ---------- %(info_not_none)s forward : instance of Forward Forward operator. csd : instance of CrossSpectralDensity The data cross-spectral density (CSD) matrices. A source estimate is performed for each frequency or frequency-bin defined in the CSD object. reg : float The regularization to apply to the cross-spectral density before computing the inverse. noise_csd : instance of CrossSpectralDensity | None Noise cross-spectral density (CSD) matrices. If provided, whitening will be done. The noise CSDs need to have been computed for the same frequencies as the data CSDs. Providing noise CSDs is mandatory if you mix sensor types, e.g. gradiometers with magnetometers or EEG with MEG. .. versionadded:: 0.20 label : Label | None Restricts the solution to a given label. %(pick_ori_bf)s %(rank_none)s .. versionadded:: 0.17 %(weight_norm)s Defaults to ``None``, in which case no normalization is performed. %(reduce_rank)s %(depth)s real_filter : bool If ``True``, take only the real part of the cross-spectral-density matrices to compute real filters. .. versionchanged:: 0.23 Version 0.23 an earlier used ``real_filter=False`` as the default, as of version 0.24 ``True`` is the default. %(inversion_bf)s .. versionchanged:: 0.21 Default changed to ``'matrix'``. %(verbose)s Returns ------- filters : instance of Beamformer Dictionary containing filter weights from DICS beamformer. Contains the following keys: 'kind' : str The type of beamformer, in this case 'DICS'. 'weights' : ndarray, shape (n_frequencies, n_weights) For each frequency, the filter weights of the beamformer. 'csd' : instance of CrossSpectralDensity The data cross-spectral density matrices used to compute the beamformer. 'ch_names' : list of str Channels used to compute the beamformer. 'proj' : ndarray, shape (n_channels, n_channels) Projections used to compute the beamformer. 'vertices' : list of ndarray Vertices for which the filter weights were computed. 'n_sources' : int Number of source location for which the filter weight were computed. 'subject' : str The subject ID. 'pick-ori' : None | 'max-power' | 'normal' | 'vector' The orientation in which the beamformer filters were computed. 'inversion' : 'single' | 'matrix' Whether the spatial filters were computed for each dipole separately or jointly for all dipoles at each vertex using a matrix inversion. 'weight_norm' : None | 'unit-noise-gain' The normalization of the weights. 'src_type' : str Type of source space. 'source_nn' : ndarray, shape (n_sources, 3) For each source location, the surface normal. 'is_free_ori' : bool Whether the filter was computed in a fixed direction (pick_ori='max-power', pick_ori='normal') or not. 'whitener' : None | ndarray, shape (n_channels, n_channels) Whitening matrix, provided if whitening was applied to the covariance matrix and leadfield during computation of the beamformer weights. 'max-power-ori' : ndarray, shape (n_sources, 3) | None When pick_ori='max-power', this fields contains the estimated direction of maximum power at each source location. See Also -------- apply_dics_csd Notes ----- The original reference is :footcite:`GrossEtAl2001`. See :footcite:`vanVlietEtAl2018` for a tutorial style paper on the topic. The DICS beamformer is very similar to the LCMV (:func:`make_lcmv`) beamformer and many of the parameters are shared. However, :func:`make_dics` and :func:`make_lcmv` currently have different defaults for these parameters, which were settled on separately through extensive practical use case testing (but not necessarily exhaustive parameter space searching), and it remains to be seen how functionally interchangeable they could be. The default setting reproduce the DICS beamformer as described in :footcite:`vanVlietEtAl2018`:: inversion='single', weight_norm=None, depth=1. To use the :func:`make_lcmv` defaults, use:: inversion='matrix', weight_norm='unit-noise-gain-invariant', depth=None For more information about ``real_filter``, see the supplemental information from :footcite:`HippEtAl2011`. References ---------- .. footbibliography:: """ # noqa: E501 rank = _check_rank(rank) _check_option('pick_ori', pick_ori, [None, 'vector', 'normal', 'max-power']) _check_option('inversion', inversion, ['single', 'matrix']) _validate_type(weight_norm, (str, None), 'weight_norm') frequencies = [np.mean(freq_bin) for freq_bin in csd.frequencies] n_freqs = len(frequencies) _, _, allow_mismatch = _check_one_ch_type('dics', info, forward, csd, noise_csd) # remove bads so that equalize_channels only keeps all good info = pick_info(info, pick_channels(info['ch_names'], [], info['bads'])) info, forward, csd = equalize_channels([info, forward, csd]) csd, noise_csd = _prepare_noise_csd(csd, noise_csd, real_filter) depth = _check_depth(depth, 'depth_sparse') if inversion == 'single': depth['combine_xyz'] = False is_free_ori, info, proj, vertices, G, whitener, nn, orient_std = \ _prepare_beamformer_input( info, forward, label, pick_ori, noise_cov=noise_csd, rank=rank, pca=False, **depth) # Compute ranks csd_int_rank = [] if not allow_mismatch: noise_rank = compute_rank(noise_csd, info=info, rank=rank) for i in range(len(frequencies)): csd_rank = compute_rank(csd.get_data(index=i, as_cov=True), info=info, rank=rank) if not allow_mismatch: for key in csd_rank: if key not in noise_rank or csd_rank[key] != noise_rank[key]: raise ValueError('%s data rank (%s) did not match the ' 'noise rank (%s)' % (key, csd_rank[key], noise_rank.get(key, None))) csd_int_rank.append(sum(csd_rank.values())) del noise_csd ch_names = list(info['ch_names']) logger.info('Computing DICS spatial filters...') Ws = [] max_oris = [] for i, freq in enumerate(frequencies): if n_freqs > 1: logger.info(' computing DICS spatial filter at ' f'{round(freq, 2)} Hz ({i + 1}/{n_freqs})') Cm = csd.get_data(index=i) # XXX: Weird that real_filter happens *before* whitening, which could # make things complex again...? if real_filter: Cm = Cm.real # compute spatial filter n_orient = 3 if is_free_ori else 1 W, max_power_ori = _compute_beamformer( G, Cm, reg, n_orient, weight_norm, pick_ori, reduce_rank, rank=csd_int_rank[i], inversion=inversion, nn=nn, orient_std=orient_std, whitener=whitener) Ws.append(W) max_oris.append(max_power_ori) Ws = np.array(Ws) if pick_ori == 'max-power': max_oris = np.array(max_oris) else: max_oris = None src_type = _get_src_type(forward['src'], vertices) subject = _subject_from_forward(forward) is_free_ori = is_free_ori if pick_ori in [None, 'vector'] else False n_sources = np.sum([len(v) for v in vertices]) filters = Beamformer( kind='DICS', weights=Ws, csd=csd, ch_names=ch_names, proj=proj, vertices=vertices, n_sources=n_sources, subject=subject, pick_ori=pick_ori, inversion=inversion, weight_norm=weight_norm, src_type=src_type, source_nn=forward['source_nn'].copy(), is_free_ori=is_free_ori, whitener=whitener, max_power_ori=max_oris) return filters def _prepare_noise_csd(csd, noise_csd, real_filter): if noise_csd is not None: csd, noise_csd = equalize_channels([csd, noise_csd]) # Use the same noise CSD for all frequencies if len(noise_csd.frequencies) > 1: noise_csd = noise_csd.mean() noise_csd = noise_csd.get_data(as_cov=True) if real_filter: noise_csd['data'] = noise_csd['data'].real return csd, noise_csd def _apply_dics(data, filters, info, tmin, tfr=False): """Apply DICS spatial filter to data for source reconstruction.""" if isinstance(data, np.ndarray) and data.ndim == (2 + tfr): data = [data] one_epoch = True else: one_epoch = False Ws = filters['weights'] one_freq = len(Ws) == 1 subject = filters['subject'] # compatibility with 0.16, add src_type as None if not present: filters, warn_text = _check_src_type(filters) for i, M in enumerate(data): if not one_epoch: logger.info("Processing epoch : %d" % (i + 1)) # Apply SSPs if not tfr: # save computation, only compute once M_w = _proj_whiten_data(M, info['projs'], filters) stcs = [] for j, W in enumerate(Ws): if tfr: # must compute for each frequency M_w = _proj_whiten_data(M[:, j], info['projs'], filters) # project to source space using beamformer weights sol = np.dot(W, M_w) if filters['is_free_ori'] and filters['pick_ori'] != 'vector': logger.info('combining the current components...') sol = combine_xyz(sol) tstep = 1.0 / info['sfreq'] stcs.append(_make_stc(sol, vertices=filters['vertices'], src_type=filters['src_type'], tmin=tmin, tstep=tstep, subject=subject, vector=(filters['pick_ori'] == 'vector'), source_nn=filters['source_nn'], warn_text=warn_text)) if one_freq: yield stcs[0] else: yield stcs logger.info('[done]') @verbose def apply_dics(evoked, filters, verbose=None): """Apply Dynamic Imaging of Coherent Sources (DICS) beamformer weights. Apply Dynamic Imaging of Coherent Sources (DICS) beamformer weights on evoked data. .. warning:: The result of this function is meant as an intermediate step for further processing (such as computing connectivity). If you are interested in estimating source time courses, use an LCMV beamformer (:func:`make_lcmv`, :func:`apply_lcmv`) instead. If you are interested in estimating spectral power at the source level, use :func:`apply_dics_csd`. .. warning:: This implementation has not been heavily tested so please report any issues or suggestions. Parameters ---------- evoked : Evoked Evoked data to apply the DICS beamformer weights to. filters : instance of Beamformer DICS spatial filter (beamformer weights) Filter weights returned from :func:`make_dics`. %(verbose)s Returns ------- stc : SourceEstimate | VolSourceEstimate | list Source time courses. If the DICS beamformer has been computed for more than one frequency, a list is returned containing for each frequency the corresponding time courses. See Also -------- apply_dics_epochs apply_dics_tfr_epochs apply_dics_csd """ # noqa: E501 _check_reference(evoked) info = evoked.info data = evoked.data tmin = evoked.times[0] sel = _check_channels_spatial_filter(evoked.ch_names, filters) data = data[sel] stc = _apply_dics(data=data, filters=filters, info=info, tmin=tmin) return next(stc) @verbose def apply_dics_epochs(epochs, filters, return_generator=False, verbose=None): """Apply Dynamic Imaging of Coherent Sources (DICS) beamformer weights. Apply Dynamic Imaging of Coherent Sources (DICS) beamformer weights on single trial data. .. warning:: The result of this function is meant as an intermediate step for further processing (such as computing connectivity). If you are interested in estimating source time courses, use an LCMV beamformer (:func:`make_lcmv`, :func:`apply_lcmv`) instead. If you are interested in estimating spectral power at the source level, use :func:`apply_dics_csd`. .. warning:: This implementation has not been heavily tested so please report any issue or suggestions. Parameters ---------- epochs : Epochs Single trial epochs. filters : instance of Beamformer DICS spatial filter (beamformer weights) Filter weights returned from :func:`make_dics`. The DICS filters must have been computed for a single frequency only. return_generator : bool Return a generator object instead of a list. This allows iterating over the stcs without having to keep them all in memory. %(verbose)s Returns ------- stc: list | generator of (SourceEstimate | VolSourceEstimate) The source estimates for all epochs. See Also -------- apply_dics apply_dics_tfr_epochs apply_dics_csd """ _check_reference(epochs) if len(filters['weights']) > 1: raise ValueError( 'This function only works on DICS beamformer weights that have ' 'been computed for a single frequency. When calling make_dics(), ' 'make sure to use a CSD object with only a single frequency (or ' 'frequency-bin) defined.' ) info = epochs.info tmin = epochs.times[0] sel = _check_channels_spatial_filter(epochs.ch_names, filters) data = epochs.get_data()[:, sel, :] stcs = _apply_dics(data=data, filters=filters, info=info, tmin=tmin) if not return_generator: stcs = list(stcs) return stcs @verbose def apply_dics_tfr_epochs(epochs_tfr, filters, return_generator=False, verbose=None): """Apply Dynamic Imaging of Coherent Sources (DICS) beamformer weights. Apply Dynamic Imaging of Coherent Sources (DICS) beamformer weights on single trial time-frequency data. Parameters ---------- epochs_tfr : EpochsTFR Single trial time-frequency epochs. filters : instance of Beamformer DICS spatial filter (beamformer weights) Filter weights returned from :func:`make_dics`. return_generator : bool Return a generator object instead of a list. This allows iterating over the stcs without having to keep them all in memory. %(verbose)s Returns ------- stcs : list of list of (SourceEstimate | VectorSourceEstimate | VolSourceEstimate) The source estimates for all epochs (outside list) and for all frequencies (inside list). See Also -------- apply_dics apply_dics_epochs apply_dics_csd """ # noqa E501 _validate_type(epochs_tfr, EpochsTFR) _check_tfr_complex(epochs_tfr) if filters['pick_ori'] == 'vector': warn('Using a vector solution to compute power will lead to ' 'inaccurate directions (only in the first quadrent) ' 'because power is a strictly positive (squared) metric. ' 'Using singular value decomposition (SVD) to determine ' 'the direction is not yet supported in MNE.') sel = _check_channels_spatial_filter(epochs_tfr.ch_names, filters) data = epochs_tfr.data[:, sel, :, :] stcs = _apply_dics(data, filters, epochs_tfr.info, epochs_tfr.tmin, tfr=True) if not return_generator: stcs = [[stc for stc in tfr_stcs] for tfr_stcs in stcs] return stcs @verbose def apply_dics_csd(csd, filters, verbose=None): """Apply Dynamic Imaging of Coherent Sources (DICS) beamformer weights. Apply a previously computed DICS beamformer to a cross-spectral density (CSD) object to estimate source power in time and frequency windows specified in the CSD object :footcite:`GrossEtAl2001`. .. note:: Only power can computed from the cross-spectral density, not complex phase-amplitude, so vector DICS filters will be converted to scalar source estimates since power is strictly positive and so 3D directions cannot be combined meaningfully (the direction would be confined to the positive quadrant). Parameters ---------- csd : instance of CrossSpectralDensity The data cross-spectral density (CSD) matrices. A source estimate is performed for each frequency or frequency-bin defined in the CSD object. filters : instance of Beamformer DICS spatial filter (beamformer weights) Filter weights returned from `make_dics`. %(verbose)s Returns ------- stc : SourceEstimate Source power with frequency instead of time. frequencies : list of float The frequencies for which the source power has been computed. If the data CSD object defines frequency-bins instead of exact frequencies, the mean of each bin is returned. See Also -------- apply_dics apply_dics_epochs apply_dics_tfr_epochs References ---------- .. footbibliography:: """ # noqa: E501 ch_names = filters['ch_names'] vertices = filters['vertices'] n_orient = 3 if filters['is_free_ori'] else 1 subject = filters['subject'] whitener = filters['whitener'] n_sources = filters['n_sources'] # If CSD is summed over multiple frequencies, take the average frequency frequencies = [np.mean(dfreq) for dfreq in csd.frequencies] n_freqs = len(frequencies) source_power = np.zeros((n_sources, len(csd.frequencies))) # Ensure the CSD is in the same order as the weights csd_picks = [csd.ch_names.index(ch) for ch in ch_names] logger.info('Computing DICS source power...') for i, freq in enumerate(frequencies): if n_freqs > 1: logger.info(' applying DICS spatial filter at ' f'{round(freq, 2)} Hz ({i + 1}/{n_freqs})') Cm = csd.get_data(index=i) Cm = Cm[csd_picks, :][:, csd_picks] W = filters['weights'][i] # Whiten the CSD Cm = np.dot(whitener, np.dot(Cm, whitener.conj().T)) source_power[:, i] = _compute_power(Cm, W, n_orient) logger.info('[done]') # compatibility with 0.16, add src_type as None if not present: filters, warn_text = _check_src_type(filters) return (_make_stc(source_power, vertices=vertices, src_type=filters['src_type'], tmin=0., tstep=1., subject=subject, warn_text=warn_text), frequencies)
{ "content_hash": "8f5f361315fc5874acb5dd6fc609883d", "timestamp": "", "source": "github", "line_count": 573, "max_line_length": 86, "avg_line_length": 37.78359511343805, "alnum_prop": 0.6145034642032332, "repo_name": "mne-tools/mne-python", "id": "ec9937968fb02577e9913b1283e86f7ae9f6b277", "size": "21650", "binary": false, "copies": "3", "ref": "refs/heads/main", "path": "mne/beamformer/_dics.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Csound Document", "bytes": "24999" }, { "name": "JavaScript", "bytes": "8008" }, { "name": "Jinja", "bytes": "14962" }, { "name": "Makefile", "bytes": "4612" }, { "name": "Python", "bytes": "10364736" }, { "name": "Sass", "bytes": "257" }, { "name": "Shell", "bytes": "20137" } ], "symlink_target": "" }
import sys from pip._internal.utils.compatibility_tags import get_supported, version_info_to_nodot from pip._internal.utils.misc import normalize_version_info from pip._internal.utils.typing import MYPY_CHECK_RUNNING if MYPY_CHECK_RUNNING: from typing import List, Optional, Tuple from pip._vendor.packaging.tags import Tag class TargetPython(object): """ Encapsulates the properties of a Python interpreter one is targeting for a package install, download, etc. """ __slots__ = [ "_given_py_version_info", "abis", "implementation", "platforms", "py_version", "py_version_info", "_valid_tags", ] def __init__( self, platforms=None, # type: Optional[List[str]] py_version_info=None, # type: Optional[Tuple[int, ...]] abis=None, # type: Optional[List[str]] implementation=None, # type: Optional[str] ): # type: (...) -> None """ :param platforms: A list of strings or None. If None, searches for packages that are supported by the current system. Otherwise, will find packages that can be built on the platforms passed in. These packages will only be downloaded for distribution: they will not be built locally. :param py_version_info: An optional tuple of ints representing the Python version information to use (e.g. `sys.version_info[:3]`). This can have length 1, 2, or 3 when provided. :param abis: A list of strings or None. This is passed to compatibility_tags.py's get_supported() function as is. :param implementation: A string or None. This is passed to compatibility_tags.py's get_supported() function as is. """ # Store the given py_version_info for when we call get_supported(). self._given_py_version_info = py_version_info if py_version_info is None: py_version_info = sys.version_info[:3] else: py_version_info = normalize_version_info(py_version_info) py_version = '.'.join(map(str, py_version_info[:2])) self.abis = abis self.implementation = implementation self.platforms = platforms self.py_version = py_version self.py_version_info = py_version_info # This is used to cache the return value of get_tags(). self._valid_tags = None # type: Optional[List[Tag]] def format_given(self): # type: () -> str """ Format the given, non-None attributes for display. """ display_version = None if self._given_py_version_info is not None: display_version = '.'.join( str(part) for part in self._given_py_version_info ) key_values = [ ('platforms', self.platforms), ('version_info', display_version), ('abis', self.abis), ('implementation', self.implementation), ] return ' '.join( '{}={!r}'.format(key, value) for key, value in key_values if value is not None ) def get_tags(self): # type: () -> List[Tag] """ Return the supported PEP 425 tags to check wheel candidates against. The tags are returned in order of preference (most preferred first). """ if self._valid_tags is None: # Pass versions=None if no py_version_info was given since # versions=None uses special default logic. py_version_info = self._given_py_version_info if py_version_info is None: version = None else: version = version_info_to_nodot(py_version_info) tags = get_supported( version=version, platforms=self.platforms, abis=self.abis, impl=self.implementation, ) self._valid_tags = tags return self._valid_tags
{ "content_hash": "fa932a54e807eed1094aaa6eaebc66ed", "timestamp": "", "source": "github", "line_count": 117, "max_line_length": 87, "avg_line_length": 34.78632478632478, "alnum_prop": 0.5803439803439804, "repo_name": "kimjinyong/i2nsf-framework", "id": "4593dc854f89846cf9507f0d0035ae799def5d8d", "size": "4070", "binary": false, "copies": "14", "ref": "refs/heads/master", "path": "Hackathon-112/analyzer/.local/lib/python3.5/site-packages/pip/_internal/models/target_python.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "4396520" }, { "name": "C++", "bytes": "9389" }, { "name": "CSS", "bytes": "51736" }, { "name": "Dockerfile", "bytes": "3839" }, { "name": "Emacs Lisp", "bytes": "24812" }, { "name": "Erlang", "bytes": "1364078" }, { "name": "HTML", "bytes": "42486541" }, { "name": "Hack", "bytes": "6349" }, { "name": "Java", "bytes": "7976" }, { "name": "JavaScript", "bytes": "533000" }, { "name": "Makefile", "bytes": "401170" }, { "name": "PHP", "bytes": "164007" }, { "name": "Perl", "bytes": "2188" }, { "name": "Python", "bytes": "3004949" }, { "name": "QMake", "bytes": "360" }, { "name": "Roff", "bytes": "3906372" }, { "name": "Shell", "bytes": "83872" }, { "name": "XSLT", "bytes": "167018" } ], "symlink_target": "" }
import os import time def handle_headers(frame, request, response): resource_url = request.GET.first(b"resource-url").decode() link_header_value = "<{}>; rel=preload; as=script".format(resource_url) early_hints = [ (b":status", b"103"), (b"link", link_header_value), ] response.writer.write_raw_header_frame(headers=early_hints, end_headers=True) # Sleep to simulate a slow generation of the final response. time.sleep(0.1) response.status = 200 response.headers[b"content-type"] = "application/pdf" response.write_status_headers() def main(request, response): current_dir = os.path.dirname(os.path.realpath(__file__)) file_path = os.path.join(current_dir, "example.pdf") with open(file_path, "rb") as f: content = f.read() response.writer.write_data(item=content, last=True)
{ "content_hash": "243455349cfc5a7c83f6c730a26672e3", "timestamp": "", "source": "github", "line_count": 26, "max_line_length": 75, "avg_line_length": 34.73076923076923, "alnum_prop": 0.6334440753045404, "repo_name": "nwjs/chromium.src", "id": "0d05f2a3c57266ce183457b83dad020a4ce580fd", "size": "903", "binary": false, "copies": "13", "ref": "refs/heads/nw70", "path": "third_party/blink/web_tests/external/wpt/loading/early-hints/resources/pdf-with-early-hints.h2.py", "mode": "33188", "license": "bsd-3-clause", "language": [], "symlink_target": "" }
import contextlib import copy import inspect import logging import mock import six import webob.exc as wexc from neutron.api import extensions from neutron.api.v2 import attributes from neutron.api.v2 import base from neutron.common import exceptions as n_exc from neutron import context from neutron.db import db_base_plugin_v2 as base_plugin from neutron.db import l3_db from neutron.extensions import portbindings from neutron.extensions import providernet as provider from neutron import manager from neutron.openstack.common import gettextutils from neutron.plugins.cisco.common import cisco_constants as const from neutron.plugins.cisco.common import cisco_exceptions as c_exc from neutron.plugins.cisco.common import config as cisco_config from neutron.plugins.cisco.db import network_db_v2 from neutron.plugins.cisco.db import nexus_db_v2 from neutron.plugins.cisco.models import virt_phy_sw_v2 from neutron.plugins.openvswitch.common import config as ovs_config from neutron.plugins.openvswitch import ovs_db_v2 from neutron.tests.unit import _test_extension_portbindings as test_bindings from neutron.tests.unit import test_db_plugin from neutron.tests.unit import test_extensions LOG = logging.getLogger(__name__) CORE_PLUGIN = 'neutron.plugins.cisco.network_plugin.PluginV2' NEXUS_PLUGIN = 'neutron.plugins.cisco.nexus.cisco_nexus_plugin_v2.NexusPlugin' NEXUS_DRIVER = ('neutron.plugins.cisco.nexus.' 'cisco_nexus_network_driver_v2.CiscoNEXUSDriver') PHYS_NET = 'physnet1' BRIDGE_NAME = 'br-eth1' VLAN_START = 1000 VLAN_END = 1100 COMP_HOST_NAME = 'testhost' COMP_HOST_NAME_2 = 'testhost_2' NEXUS_IP_ADDR = '1.1.1.1' NEXUS_DEV_ID = 'NEXUS_SWITCH' NEXUS_USERNAME = 'admin' NEXUS_PASSWORD = 'mySecretPassword' NEXUS_SSH_PORT = 22 NEXUS_INTERFACE = '1/1' NEXUS_INTERFACE_2 = '1/2' NEXUS_PORT_1 = 'ethernet:1/1' NEXUS_PORT_2 = 'ethernet:1/2' NETWORK_NAME = 'test_network' CIDR_1 = '10.0.0.0/24' CIDR_2 = '10.0.1.0/24' DEVICE_ID_1 = '11111111-1111-1111-1111-111111111111' DEVICE_ID_2 = '22222222-2222-2222-2222-222222222222' DEVICE_OWNER = 'compute:None' class CiscoNetworkPluginV2TestCase(test_db_plugin.NeutronDbPluginV2TestCase): def setUp(self): """Configure for end-to-end neutron testing using a mock ncclient. This setup includes: - Configure the OVS plugin to use VLANs in the range of VLAN_START-VLAN_END. - Configure the Cisco plugin model to use the Nexus driver. - Configure the Nexus driver to use an imaginary switch at NEXUS_IP_ADDR. """ # Configure the OVS and Cisco plugins phys_bridge = ':'.join([PHYS_NET, BRIDGE_NAME]) phys_vlan_range = ':'.join([PHYS_NET, str(VLAN_START), str(VLAN_END)]) config = { ovs_config: { 'OVS': {'bridge_mappings': phys_bridge, 'network_vlan_ranges': [phys_vlan_range], 'tenant_network_type': 'vlan'} }, cisco_config: { 'CISCO': {'nexus_driver': NEXUS_DRIVER}, 'CISCO_PLUGINS': {'nexus_plugin': NEXUS_PLUGIN}, } } for module in config: for group in config[module]: for opt, val in config[module][group].items(): module.cfg.CONF.set_override(opt, val, group) # Configure the Nexus switch dictionary # TODO(Henry): add tests for other devices nexus_config = { (NEXUS_DEV_ID, NEXUS_IP_ADDR, 'username'): NEXUS_USERNAME, (NEXUS_DEV_ID, NEXUS_IP_ADDR, 'password'): NEXUS_PASSWORD, (NEXUS_DEV_ID, NEXUS_IP_ADDR, 'ssh_port'): NEXUS_SSH_PORT, (NEXUS_DEV_ID, NEXUS_IP_ADDR, COMP_HOST_NAME): NEXUS_INTERFACE, (NEXUS_DEV_ID, NEXUS_IP_ADDR, COMP_HOST_NAME_2): NEXUS_INTERFACE_2, } nexus_patch = mock.patch.dict(cisco_config.device_dictionary, nexus_config) nexus_patch.start() self.addCleanup(nexus_patch.stop) # Use a mock netconf client self.mock_ncclient = mock.Mock() ncclient_patch = mock.patch.dict('sys.modules', {'ncclient': self.mock_ncclient}) ncclient_patch.start() self.addCleanup(ncclient_patch.stop) # Call the parent setUp, start the core plugin super(CiscoNetworkPluginV2TestCase, self).setUp(CORE_PLUGIN) self.port_create_status = 'DOWN' # Set Cisco config module's first configured Nexus IP address. # Used for SVI placement when round-robin placement is disabled. mock.patch.object(cisco_config, 'first_device_ip', new=NEXUS_IP_ADDR).start() def _get_plugin_ref(self): return getattr(manager.NeutronManager.get_plugin(), "_model")._plugins[const.VSWITCH_PLUGIN] @contextlib.contextmanager def _patch_ncclient(self, attr, value): """Configure an attribute on the mock ncclient module. This method can be used to inject errors by setting a side effect or a return value for an ncclient method. :param attr: ncclient attribute (typically method) to be configured. :param value: Value to be configured on the attribute. """ # Configure attribute. config = {attr: value} self.mock_ncclient.configure_mock(**config) # Continue testing yield # Unconfigure attribute config = {attr: None} self.mock_ncclient.configure_mock(**config) @staticmethod def _config_dependent_side_effect(match_config, exc): """Generates a config-dependent side effect for ncclient edit_config. This method generates a mock side-effect function which can be configured on the mock ncclient module for the edit_config method. This side effect will cause a given exception to be raised whenever the XML config string that is passed to edit_config contains all words in a given match config string. :param match_config: String containing keywords to be matched :param exc: Exception to be raised when match is found :return: Side effect function for the mock ncclient module's edit_config method. """ keywords = match_config.split() def _side_effect_function(target, config): if all(word in config for word in keywords): raise exc return _side_effect_function def _is_in_nexus_cfg(self, words): """Check if any config sent to Nexus contains all words in a list.""" for call in (self.mock_ncclient.manager.connect.return_value. edit_config.mock_calls): configlet = call[2]['config'] if all(word in configlet for word in words): return True return False def _is_in_last_nexus_cfg(self, words): """Check if last config sent to Nexus contains all words in a list.""" last_cfg = (self.mock_ncclient.manager.connect.return_value. edit_config.mock_calls[-1][2]['config']) return all(word in last_cfg for word in words) def _is_vlan_configured(self, vlan_creation_expected=True, add_keyword_expected=False): vlan_created = self._is_in_nexus_cfg(['vlan', 'vlan-name']) add_appears = self._is_in_last_nexus_cfg(['add']) return (self._is_in_last_nexus_cfg(['allowed', 'vlan']) and vlan_created == vlan_creation_expected and add_appears == add_keyword_expected) def _is_vlan_unconfigured(self, vlan_deletion_expected=True, vlan_untrunk_expected=True): vlan_deleted = self._is_in_nexus_cfg( ['no', 'vlan', 'vlan-id-create-delete']) vlan_untrunked = self._is_in_nexus_cfg(['allowed', 'vlan', 'remove']) return (vlan_deleted == vlan_deletion_expected and vlan_untrunked == vlan_untrunk_expected) def _assertExpectedHTTP(self, status, exc): """Confirm that an HTTP status corresponds to an expected exception. Confirm that an HTTP status which has been returned for an neutron API request matches the HTTP status corresponding to an expected exception. :param status: HTTP status :param exc: Expected exception """ if exc in base.FAULT_MAP: expected_http = base.FAULT_MAP[exc].code else: expected_http = wexc.HTTPInternalServerError.code self.assertEqual(status, expected_http) class TestCiscoGetAttribute(CiscoNetworkPluginV2TestCase): def test_get_unsupported_attr_in_lazy_gettext_mode(self): """Test get of unsupported attribute in lazy gettext mode. This test also checks that this operation does not cause excessive nesting of calls to deepcopy. """ plugin = manager.NeutronManager.get_plugin() def _lazy_gettext(msg): return gettextutils.Message(msg, domain='neutron') with mock.patch.dict(six.moves.builtins.__dict__, {'_': _lazy_gettext}): self.nesting_count = 0 def _count_nesting(*args, **kwargs): self.nesting_count += 1 with mock.patch.object(copy, 'deepcopy', side_effect=_count_nesting, wraps=copy.deepcopy): self.assertRaises(AttributeError, getattr, plugin, 'an_unsupported_attribute') # If there were no nested calls to deepcopy, then the total # number of calls to deepcopy should be 2 (1 call for # each mod'd field in the AttributeError message raised # by the plugin). self.assertEqual(self.nesting_count, 2) class TestCiscoBasicGet(CiscoNetworkPluginV2TestCase, test_db_plugin.TestBasicGet): pass class TestCiscoV2HTTPResponse(CiscoNetworkPluginV2TestCase, test_db_plugin.TestV2HTTPResponse): pass class TestCiscoPortsV2(CiscoNetworkPluginV2TestCase, test_db_plugin.TestPortsV2, test_bindings.PortBindingsHostTestCaseMixin): @contextlib.contextmanager def _create_port_res(self, name=NETWORK_NAME, cidr=CIDR_1, do_delete=True, host_id=COMP_HOST_NAME): """Create a network, subnet, and port and yield the result. Create a network, subnet, and port, yield the result, then delete the port, subnet, and network. :param name: Name of network to be created :param cidr: cidr address of subnetwork to be created :param do_delete: If set to True, delete the port at the end of testing :param host_id: Name of compute host to use for testing """ ctx = context.get_admin_context() with self.network(name=name) as network: with self.subnet(network=network, cidr=cidr) as subnet: net_id = subnet['subnet']['network_id'] args = (portbindings.HOST_ID, 'device_id', 'device_owner') port_dict = {portbindings.HOST_ID: host_id, 'device_id': DEVICE_ID_1, 'device_owner': DEVICE_OWNER} res = self._create_port(self.fmt, net_id, arg_list=args, context=ctx, **port_dict) port = self.deserialize(self.fmt, res) yield res if do_delete: self._delete('ports', port['port']['id']) def test_create_ports_bulk_emulated_plugin_failure(self): real_has_attr = hasattr #ensures the API choose the emulation code path def fakehasattr(item, attr): if attr.endswith('__native_bulk_support'): return False return real_has_attr(item, attr) with mock.patch('__builtin__.hasattr', new=fakehasattr): plugin_ref = self._get_plugin_ref() orig = plugin_ref.create_port with mock.patch.object(plugin_ref, 'create_port') as patched_plugin: def side_effect(*args, **kwargs): return self._do_side_effect(patched_plugin, orig, *args, **kwargs) patched_plugin.side_effect = side_effect with self.network() as net: res = self._create_port_bulk(self.fmt, 2, net['network']['id'], 'test', True) # Expect an internal server error as we injected a fault self._validate_behavior_on_bulk_failure( res, 'ports', wexc.HTTPInternalServerError.code) def test_create_ports_bulk_native(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk port create") def test_create_ports_bulk_emulated(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk port create") def test_create_ports_bulk_native_plugin_failure(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk port create") ctx = context.get_admin_context() with self.network() as net: plugin_ref = self._get_plugin_ref() orig = plugin_ref.create_port with mock.patch.object(plugin_ref, 'create_port') as patched_plugin: def side_effect(*args, **kwargs): return self._do_side_effect(patched_plugin, orig, *args, **kwargs) patched_plugin.side_effect = side_effect res = self._create_port_bulk(self.fmt, 2, net['network']['id'], 'test', True, context=ctx) # We expect an internal server error as we injected a fault self._validate_behavior_on_bulk_failure( res, 'ports', wexc.HTTPInternalServerError.code) def test_nexus_enable_vlan_cmd(self): """Verify the syntax of the command to enable a vlan on an intf.""" # First vlan should be configured without 'add' keyword with self._create_port_res(name='net1', cidr=CIDR_1): self.assertTrue(self._is_vlan_configured( vlan_creation_expected=True, add_keyword_expected=False)) self.mock_ncclient.reset_mock() # Second vlan should be configured with 'add' keyword with self._create_port_res(name='net2', cidr=CIDR_2): self.assertTrue(self._is_vlan_configured( vlan_creation_expected=True, add_keyword_expected=True)) def test_nexus_vlan_config_two_hosts(self): """Verify config/unconfig of vlan on two compute hosts.""" @contextlib.contextmanager def _create_port_check_vlan(comp_host_name, device_id, vlan_creation_expected=True): arg_list = (portbindings.HOST_ID,) port_dict = {portbindings.HOST_ID: comp_host_name, 'device_id': device_id, 'device_owner': DEVICE_OWNER} with self.port(subnet=subnet, fmt=self.fmt, arg_list=arg_list, **port_dict): self.assertTrue(self._is_vlan_configured( vlan_creation_expected=vlan_creation_expected, add_keyword_expected=False)) self.mock_ncclient.reset_mock() yield # Create network and subnet with self.network(name=NETWORK_NAME) as network: with self.subnet(network=network, cidr=CIDR_1) as subnet: # Create an instance on first compute host with _create_port_check_vlan( COMP_HOST_NAME, DEVICE_ID_1, vlan_creation_expected=True): # Create an instance on second compute host with _create_port_check_vlan( COMP_HOST_NAME_2, DEVICE_ID_2, vlan_creation_expected=False): pass # Instance on second host is now terminated. # Vlan should be untrunked from port, but vlan should # still exist on the switch. self.assertTrue(self._is_vlan_unconfigured( vlan_deletion_expected=False)) self.mock_ncclient.reset_mock() # Instance on first host is now terminated. # Vlan should be untrunked from port and vlan should have # been deleted from the switch. self.assertTrue(self._is_vlan_unconfigured( vlan_deletion_expected=True)) def test_nexus_connect_fail(self): """Test failure to connect to a Nexus switch. While creating a network, subnet, and port, simulate a connection failure to a nexus switch. Confirm that the expected HTTP code is returned for the create port operation. """ with self._patch_ncclient('manager.connect.side_effect', AttributeError): with self._create_port_res(do_delete=False) as res: self._assertExpectedHTTP(res.status_int, c_exc.NexusConnectFailed) def test_nexus_config_fail(self): """Test a Nexus switch configuration failure. While creating a network, subnet, and port, simulate a nexus switch configuration error. Confirm that the expected HTTP code is returned for the create port operation. """ with self._patch_ncclient( 'manager.connect.return_value.edit_config.side_effect', AttributeError): with self._create_port_res(do_delete=False) as res: self._assertExpectedHTTP(res.status_int, c_exc.NexusConfigFailed) def test_nexus_extended_vlan_range_failure(self): """Test that extended VLAN range config errors are ignored. Some versions of Nexus switch do not allow state changes for the extended VLAN range (1006-4094), but these errors can be ignored (default values are appropriate). Test that such errors are ignored by the Nexus plugin. """ config_err_strings = { "state active": "Can't modify state for extended", "no shutdown": "Command is only allowed on VLAN", } for config, err_string in config_err_strings.items(): with self._patch_ncclient( 'manager.connect.return_value.edit_config.side_effect', self._config_dependent_side_effect(config, Exception(err_string))): with self._create_port_res() as res: self.assertEqual(res.status_int, wexc.HTTPCreated.code) def test_nexus_vlan_config_rollback(self): """Test rollback following Nexus VLAN state config failure. Test that the Cisco Nexus plugin correctly deletes the VLAN on the Nexus switch when the 'state active' command fails (for a reason other than state configuration change is rejected for the extended VLAN range). """ vlan_state_configs = ['state active', 'no shutdown'] for config in vlan_state_configs: with self._patch_ncclient( 'manager.connect.return_value.edit_config.side_effect', self._config_dependent_side_effect(config, ValueError)): with self._create_port_res(do_delete=False) as res: # Confirm that the last configuration sent to the Nexus # switch was deletion of the VLAN. self.assertTrue( self._is_in_last_nexus_cfg(['<no>', '<vlan>']) ) self._assertExpectedHTTP(res.status_int, c_exc.NexusConfigFailed) def test_get_seg_id_fail(self): """Test handling of a NetworkSegmentIDNotFound exception. Test the Cisco NetworkSegmentIDNotFound exception by simulating a return of None by the OVS DB get_network_binding method during port creation. """ orig = ovs_db_v2.get_network_binding def _return_none_if_nexus_caller(self, *args, **kwargs): def _calling_func_name(offset=0): """Get name of the calling function 'offset' frames back.""" return inspect.stack()[1 + offset][3] if (_calling_func_name(1) == '_get_segmentation_id' and _calling_func_name(2) == '_invoke_nexus_for_net_create'): return None else: return orig(self, *args, **kwargs) with mock.patch.object(ovs_db_v2, 'get_network_binding', new=_return_none_if_nexus_caller): with self._create_port_res(do_delete=False) as res: self._assertExpectedHTTP(res.status_int, c_exc.NetworkSegmentIDNotFound) def test_nexus_host_non_configured(self): """Test handling of a NexusComputeHostNotConfigured exception. Test the Cisco NexusComputeHostNotConfigured exception by using a fictitious host name during port creation. """ with self._create_port_res(do_delete=False, host_id='fakehost') as res: self._assertExpectedHTTP(res.status_int, c_exc.NexusComputeHostNotConfigured) def _check_rollback_on_bind_failure(self, vlan_deletion_expected, vlan_untrunk_expected): """Test for proper rollback following add Nexus DB binding failure. Test that the Cisco Nexus plugin correctly rolls back the vlan configuration on the Nexus switch when add_nexusport_binding fails within the plugin's create_port() method. """ inserted_exc = KeyError with mock.patch.object(nexus_db_v2, 'add_nexusport_binding', side_effect=inserted_exc): with self._create_port_res(do_delete=False) as res: # Confirm that the configuration sent to the Nexus # switch includes deletion of the vlan (if expected) # and untrunking of the vlan from the ethernet interface # (if expected). self.assertTrue(self._is_vlan_unconfigured( vlan_deletion_expected=vlan_deletion_expected, vlan_untrunk_expected=vlan_untrunk_expected)) self._assertExpectedHTTP(res.status_int, inserted_exc) def test_nexus_rollback_on_bind_failure_non_provider_vlan(self): """Test rollback upon DB binding failure for non-provider vlan.""" self._check_rollback_on_bind_failure(vlan_deletion_expected=True, vlan_untrunk_expected=True) def test_nexus_rollback_on_bind_failure_prov_vlan_no_auto_create(self): """Test rollback on bind fail for prov vlan w auto-create disabled.""" with mock.patch.object(network_db_v2, 'is_provider_vlan', return_value=True): # Disable auto-create. This config change will be cleared based # on cleanup scheduled in the CiscoNetworkPluginV2TestCase # class' setUp() method. cisco_config.CONF.set_override('provider_vlan_auto_create', False, 'CISCO') self._check_rollback_on_bind_failure(vlan_deletion_expected=False, vlan_untrunk_expected=True) def test_nexus_rollback_on_bind_failure_prov_vlan_no_auto_trunk(self): """Test rollback on bind fail for prov vlan w auto-trunk disabled.""" with mock.patch.object(network_db_v2, 'is_provider_vlan', return_value=True): # Disable auto-trunk. This config change will be cleared # based on post-test cleanup scheduled in the # CiscoNetworkPluginV2TestCase class' setUp() method. cisco_config.CONF.set_override('provider_vlan_auto_trunk', False, 'CISCO') self._check_rollback_on_bind_failure(vlan_deletion_expected=True, vlan_untrunk_expected=False) def test_model_update_port_rollback(self): """Test for proper rollback for Cisco model layer update port failure. Test that the vSwitch plugin port configuration is rolled back (restored) by the Cisco plugin model layer when there is a failure in the Nexus sub-plugin for an update port operation. The update port operation simulates a port attachment scenario: first a port is created with no instance (null device_id), and then a port update is requested with a non-null device_id to simulate the port attachment. """ with self.port(fmt=self.fmt, device_id='', device_owner=DEVICE_OWNER) as orig_port: inserted_exc = ValueError with mock.patch.object( virt_phy_sw_v2.VirtualPhysicalSwitchModelV2, '_invoke_nexus_for_net_create', side_effect=inserted_exc): # Send an update port request including a non-null device ID data = {'port': {'device_id': DEVICE_ID_2, 'device_owner': DEVICE_OWNER, portbindings.HOST_ID: COMP_HOST_NAME}} port_id = orig_port['port']['id'] req = self.new_update_request('ports', data, port_id) res = req.get_response(self.api) # Sanity check failure result code self._assertExpectedHTTP(res.status_int, inserted_exc) # Check that the port still has the original device ID plugin = base_plugin.NeutronDbPluginV2() ctx = context.get_admin_context() db_port = plugin._get_port(ctx, port_id) self.assertEqual(db_port['device_id'], orig_port['port']['device_id']) def test_model_delete_port_rollback(self): """Test for proper rollback for OVS plugin delete port failure. Test that the nexus port configuration is rolled back (restored) by the Cisco model plugin when there is a failure in the OVS plugin for a delete port operation. """ with self._create_port_res() as res: # After port is created, we should have one binding for this # vlan/nexus switch. port = self.deserialize(self.fmt, res) start_rows = nexus_db_v2.get_nexusvlan_binding(VLAN_START, NEXUS_IP_ADDR) self.assertEqual(len(start_rows), 1) # Inject an exception in the OVS plugin delete_port # processing, and attempt a port deletion. inserted_exc = n_exc.Conflict expected_http = base.FAULT_MAP[inserted_exc].code with mock.patch.object(l3_db.L3_NAT_db_mixin, 'disassociate_floatingips', side_effect=inserted_exc): self._delete('ports', port['port']['id'], expected_code=expected_http) # Confirm that the Cisco model plugin has restored # the nexus configuration for this port after deletion failure. end_rows = nexus_db_v2.get_nexusvlan_binding(VLAN_START, NEXUS_IP_ADDR) self.assertEqual(start_rows, end_rows) def test_nexus_delete_port_rollback(self): """Test for proper rollback for nexus plugin delete port failure. Test for rollback (i.e. restoration) of a VLAN entry in the nexus database whenever the nexus plugin fails to reconfigure the nexus switch during a delete_port operation. """ with self._create_port_res() as res: port = self.deserialize(self.fmt, res) # Check that there is only one binding in the nexus database # for this VLAN/nexus switch. start_rows = nexus_db_v2.get_nexusvlan_binding(VLAN_START, NEXUS_IP_ADDR) self.assertEqual(len(start_rows), 1) # Simulate a Nexus switch configuration error during # port deletion. with self._patch_ncclient( 'manager.connect.return_value.edit_config.side_effect', AttributeError): self._delete('ports', port['port']['id'], base.FAULT_MAP[c_exc.NexusConfigFailed].code) # Confirm that the binding has been restored (rolled back). end_rows = nexus_db_v2.get_nexusvlan_binding(VLAN_START, NEXUS_IP_ADDR) self.assertEqual(start_rows, end_rows) def test_model_update_port_attach(self): """Test the model for update_port in attaching to an instance. Mock the routines that call into the plugin code, and make sure they are called with correct arguments. """ with contextlib.nested( self.port(), mock.patch.object(virt_phy_sw_v2.VirtualPhysicalSwitchModelV2, '_invoke_plugin_per_device'), mock.patch.object(virt_phy_sw_v2.VirtualPhysicalSwitchModelV2, '_invoke_nexus_for_net_create') ) as (port, invoke_plugin_per_device, invoke_nexus_for_net_create): data = {'port': {portbindings.HOST_ID: COMP_HOST_NAME, 'device_id': DEVICE_ID_1, 'device_owner': DEVICE_OWNER}} req = self.new_update_request('ports', data, port['port']['id']) # Note, due to mocking out the two model routines, response won't # contain any useful data req.get_response(self.api) # Note that call_args_list is used instead of # assert_called_once_with which requires exact match of arguments. # This is because the mocked routines contain variable number of # arguments and/or dynamic objects. self.assertEqual(invoke_plugin_per_device.call_count, 1) self.assertEqual( invoke_plugin_per_device.call_args_list[0][0][0:2], (const.VSWITCH_PLUGIN, 'update_port')) self.assertEqual(invoke_nexus_for_net_create.call_count, 1) self.assertEqual( invoke_nexus_for_net_create.call_args_list[0][0][1:], (port['port']['tenant_id'], port['port']['network_id'], data['port']['device_id'], data['port'][portbindings.HOST_ID],)) def test_model_update_port_migrate(self): """Test the model for update_port in migrating an instance. Mock the routines that call into the plugin code, and make sure they are called with correct arguments. """ arg_list = (portbindings.HOST_ID,) data = {portbindings.HOST_ID: COMP_HOST_NAME, 'device_id': DEVICE_ID_1, 'device_owner': DEVICE_OWNER} with contextlib.nested( self.port(arg_list=arg_list, **data), mock.patch.object(virt_phy_sw_v2.VirtualPhysicalSwitchModelV2, '_invoke_plugin_per_device'), mock.patch.object(virt_phy_sw_v2.VirtualPhysicalSwitchModelV2, '_invoke_nexus_for_net_create') ) as (port, invoke_plugin_per_device, invoke_nexus_for_net_create): data = {'port': {portbindings.HOST_ID: COMP_HOST_NAME_2}} req = self.new_update_request('ports', data, port['port']['id']) # Note, due to mocking out the two model routines, response won't # contain any useful data req.get_response(self.api) # Note that call_args_list is used instead of # assert_called_once_with which requires exact match of arguments. # This is because the mocked routines contain variable number of # arguments and/or dynamic objects. self.assertEqual(invoke_plugin_per_device.call_count, 2) self.assertEqual( invoke_plugin_per_device.call_args_list[0][0][0:2], (const.VSWITCH_PLUGIN, 'update_port')) self.assertEqual( invoke_plugin_per_device.call_args_list[1][0][0:2], (const.NEXUS_PLUGIN, 'delete_port')) self.assertEqual(invoke_nexus_for_net_create.call_count, 1) self.assertEqual( invoke_nexus_for_net_create.call_args_list[0][0][1:], (port['port']['tenant_id'], port['port']['network_id'], port['port']['device_id'], data['port'][portbindings.HOST_ID],)) def test_model_update_port_net_create_not_needed(self): """Test the model for update_port when no action is needed. Mock the routines that call into the plugin code, and make sure that VSWITCH plugin is called with correct arguments, while NEXUS plugin is not called at all. """ arg_list = (portbindings.HOST_ID,) data = {portbindings.HOST_ID: COMP_HOST_NAME, 'device_id': DEVICE_ID_1, 'device_owner': DEVICE_OWNER} with contextlib.nested( self.port(arg_list=arg_list, **data), mock.patch.object(virt_phy_sw_v2.VirtualPhysicalSwitchModelV2, '_invoke_plugin_per_device'), mock.patch.object(virt_phy_sw_v2.VirtualPhysicalSwitchModelV2, '_invoke_nexus_for_net_create') ) as (port, invoke_plugin_per_device, invoke_nexus_for_net_create): data = {'port': {portbindings.HOST_ID: COMP_HOST_NAME, 'device_id': DEVICE_ID_1, 'device_owner': DEVICE_OWNER}} req = self.new_update_request('ports', data, port['port']['id']) # Note, due to mocking out the two model routines, response won't # contain any useful data req.get_response(self.api) # Note that call_args_list is used instead of # assert_called_once_with which requires exact match of arguments. # This is because the mocked routines contain variable number of # arguments and/or dynamic objects. self.assertEqual(invoke_plugin_per_device.call_count, 1) self.assertEqual( invoke_plugin_per_device.call_args_list[0][0][0:2], (const.VSWITCH_PLUGIN, 'update_port')) self.assertFalse(invoke_nexus_for_net_create.called) def verify_portbinding(self, host_id1, host_id2, vlan, device_id, binding_port): """Verify a port binding entry in the DB is correct.""" self.assertEqual(host_id1, host_id2) pb = nexus_db_v2.get_nexusvm_bindings(vlan, device_id) self.assertEqual(len(pb), 1) self.assertEqual(pb[0].port_id, binding_port) self.assertEqual(pb[0].switch_ip, NEXUS_IP_ADDR) def test_db_update_port_attach(self): """Test DB for update_port in attaching to an instance. Query DB for the port binding entry corresponding to the search key (vlan, device_id), and make sure that it's bound to correct switch port """ with self.port() as port: data = {'port': {portbindings.HOST_ID: COMP_HOST_NAME, 'device_id': DEVICE_ID_1, 'device_owner': DEVICE_OWNER}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) ctx = context.get_admin_context() net = self._show('networks', res['port']['network_id'], neutron_context=ctx)['network'] self.assertTrue(attributes.is_attr_set( net.get(provider.SEGMENTATION_ID))) vlan = net[provider.SEGMENTATION_ID] self.assertEqual(vlan, VLAN_START) self.verify_portbinding(res['port'][portbindings.HOST_ID], data['port'][portbindings.HOST_ID], vlan, data['port']['device_id'], NEXUS_PORT_1) def test_db_update_port_migrate(self): """Test DB for update_port in migrating an instance. Query DB for the port binding entry corresponding to the search key (vlan, device_id), and make sure that it's bound to correct switch port before and after the migration. """ arg_list = (portbindings.HOST_ID,) data = {portbindings.HOST_ID: COMP_HOST_NAME, 'device_id': DEVICE_ID_1, 'device_owner': DEVICE_OWNER} with self.port(arg_list=arg_list, **data) as port: ctx = context.get_admin_context() net = self._show('networks', port['port']['network_id'], neutron_context=ctx)['network'] self.assertTrue(attributes.is_attr_set( net.get(provider.SEGMENTATION_ID))) vlan = net[provider.SEGMENTATION_ID] self.assertEqual(vlan, VLAN_START) self.verify_portbinding(port['port'][portbindings.HOST_ID], data[portbindings.HOST_ID], vlan, data['device_id'], NEXUS_PORT_1) new_data = {'port': {portbindings.HOST_ID: COMP_HOST_NAME_2}} req = self.new_update_request('ports', new_data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.verify_portbinding(res['port'][portbindings.HOST_ID], new_data['port'][portbindings.HOST_ID], vlan, data['device_id'], NEXUS_PORT_2) def test_delete_ports_by_device_id_second_call_failure(self): plugin_ref = self._get_plugin_ref() self._test_delete_ports_by_device_id_second_call_failure(plugin_ref) def test_delete_ports_ignores_port_not_found(self): plugin_ref = self._get_plugin_ref() self._test_delete_ports_ignores_port_not_found(plugin_ref) class TestCiscoNetworksV2(CiscoNetworkPluginV2TestCase, test_db_plugin.TestNetworksV2): def test_create_networks_bulk_emulated_plugin_failure(self): real_has_attr = hasattr def fakehasattr(item, attr): if attr.endswith('__native_bulk_support'): return False return real_has_attr(item, attr) plugin_ref = self._get_plugin_ref() orig = plugin_ref.create_network #ensures the API choose the emulation code path with mock.patch('__builtin__.hasattr', new=fakehasattr): with mock.patch.object(plugin_ref, 'create_network') as patched_plugin: def side_effect(*args, **kwargs): return self._do_side_effect(patched_plugin, orig, *args, **kwargs) patched_plugin.side_effect = side_effect res = self._create_network_bulk(self.fmt, 2, 'test', True) LOG.debug("response is %s" % res) # We expect an internal server error as we injected a fault self._validate_behavior_on_bulk_failure( res, 'networks', wexc.HTTPInternalServerError.code) def test_create_networks_bulk_native_plugin_failure(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk network create") plugin_ref = self._get_plugin_ref() orig = plugin_ref.create_network with mock.patch.object(plugin_ref, 'create_network') as patched_plugin: def side_effect(*args, **kwargs): return self._do_side_effect(patched_plugin, orig, *args, **kwargs) patched_plugin.side_effect = side_effect res = self._create_network_bulk(self.fmt, 2, 'test', True) # We expect an internal server error as we injected a fault self._validate_behavior_on_bulk_failure( res, 'networks', wexc.HTTPInternalServerError.code) @contextlib.contextmanager def _provider_vlan_network(self, phys_net, segment_id, net_name): provider_attrs = {provider.NETWORK_TYPE: 'vlan', provider.PHYSICAL_NETWORK: phys_net, provider.SEGMENTATION_ID: segment_id} arg_list = tuple(provider_attrs.keys()) res = self._create_network(self.fmt, net_name, True, arg_list=arg_list, **provider_attrs) network = self.deserialize(self.fmt, res)['network'] yield network req = self.new_delete_request('networks', network['id']) req.get_response(self.api) def test_create_provider_vlan_network(self): with self._provider_vlan_network(PHYS_NET, '1234', 'pvnet1') as network: expected = [('name', 'pvnet1'), ('admin_state_up', True), ('status', 'ACTIVE'), ('shared', False), (provider.NETWORK_TYPE, 'vlan'), (provider.PHYSICAL_NETWORK, PHYS_NET), (provider.SEGMENTATION_ID, 1234)] for k, v in expected: self.assertEqual(network[k], v) self.assertTrue(network_db_v2.is_provider_network(network['id'])) def test_delete_provider_vlan_network(self): with self._provider_vlan_network(PHYS_NET, '1234', 'pvnet1') as network: network_id = network['id'] # Provider network should now be deleted self.assertFalse(network_db_v2.is_provider_network(network_id)) class TestCiscoSubnetsV2(CiscoNetworkPluginV2TestCase, test_db_plugin.TestSubnetsV2): def test_create_subnets_bulk_emulated_plugin_failure(self): real_has_attr = hasattr #ensures the API choose the emulation code path def fakehasattr(item, attr): if attr.endswith('__native_bulk_support'): return False return real_has_attr(item, attr) with mock.patch('__builtin__.hasattr', new=fakehasattr): plugin_ref = self._get_plugin_ref() orig = plugin_ref.create_subnet with mock.patch.object(plugin_ref, 'create_subnet') as patched_plugin: def side_effect(*args, **kwargs): self._do_side_effect(patched_plugin, orig, *args, **kwargs) patched_plugin.side_effect = side_effect with self.network() as net: res = self._create_subnet_bulk(self.fmt, 2, net['network']['id'], 'test') # We expect an internal server error as we injected a fault self._validate_behavior_on_bulk_failure( res, 'subnets', wexc.HTTPInternalServerError.code) def test_create_subnets_bulk_native_plugin_failure(self): if self._skip_native_bulk: self.skipTest("Plugin does not support native bulk subnet create") plugin_ref = self._get_plugin_ref() orig = plugin_ref.create_subnet with mock.patch.object(plugin_ref, 'create_subnet') as patched_plugin: def side_effect(*args, **kwargs): return self._do_side_effect(patched_plugin, orig, *args, **kwargs) patched_plugin.side_effect = side_effect with self.network() as net: res = self._create_subnet_bulk(self.fmt, 2, net['network']['id'], 'test') # We expect an internal server error as we injected a fault self._validate_behavior_on_bulk_failure( res, 'subnets', wexc.HTTPInternalServerError.code) class TestCiscoRouterInterfacesV2(CiscoNetworkPluginV2TestCase): def setUp(self): """Configure a log exception counter and an API extension manager.""" self.log_exc_count = 0 def _count_exception_logs(*args, **kwargs): self.log_exc_count += 1 mock.patch.object(logging.LoggerAdapter, 'exception', autospec=True, side_effect=_count_exception_logs, wraps=logging.LoggerAdapter.exception).start() super(TestCiscoRouterInterfacesV2, self).setUp() ext_mgr = extensions.PluginAwareExtensionManager.get_instance() self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr) @contextlib.contextmanager def _network_subnet_router(self): """Context mgr for creating/deleting a net, subnet, and router.""" with self.network() as network: with self.subnet(network=network) as subnet: data = {'router': {'tenant_id': 'test_tenant_id'}} request = self.new_create_request('routers', data, self.fmt) response = request.get_response(self.ext_api) router = self.deserialize(self.fmt, response) yield network, subnet, router self._delete('routers', router['router']['id']) @contextlib.contextmanager def _router_interface(self, router, subnet, **kwargs): """Create a router interface, yield the response, then delete it.""" interface_data = {} if subnet: interface_data['subnet_id'] = subnet['subnet']['id'] interface_data.update(kwargs) request = self.new_action_request('routers', interface_data, router['router']['id'], 'add_router_interface') response = request.get_response(self.ext_api) yield response # If router interface was created successfully, delete it now. if response.status_int == wexc.HTTPOk.code: request = self.new_action_request('routers', interface_data, router['router']['id'], 'remove_router_interface') request.get_response(self.ext_api) @contextlib.contextmanager def _network_subnet_router_interface(self, **kwargs): """Context mgr for create/deleting a net, subnet, router and intf.""" with self._network_subnet_router() as (network, subnet, router): with self._router_interface(router, subnet, **kwargs) as response: yield response def test_port_list_filtered_by_router_id(self): """Test port list command filtered by router ID.""" with self._network_subnet_router() as (network, subnet, router): with self._router_interface(router, subnet): query_params = "device_id=%s" % router['router']['id'] req = self.new_list_request('ports', self.fmt, query_params) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(len(res['ports']), 1) self.assertEqual(res['ports'][0]['device_id'], router['router']['id']) self.assertFalse(self.log_exc_count) def test_add_remove_router_intf_with_nexus_l3_enabled(self): """Verifies proper add/remove intf operation with Nexus L3 enabled. With 'nexus_l3_enable' configured to True, confirm that a switched virtual interface (SVI) is created/deleted on the Nexus switch when a virtual router interface is created/deleted. """ cisco_config.CONF.set_override('nexus_l3_enable', True, 'CISCO') with self._network_subnet_router_interface(): self.assertTrue(self._is_in_last_nexus_cfg( ['interface', 'vlan', 'ip', 'address'])) # Clear list of calls made to mock ncclient self.mock_ncclient.reset() # Router interface is now deleted. Confirm that SVI # has been deleted from the Nexus switch. self.assertTrue(self._is_in_nexus_cfg(['no', 'interface', 'vlan'])) self.assertTrue(self._is_in_last_nexus_cfg(['no', 'vlan'])) def test_add_remove_router_intf_with_nexus_l3_disabled(self): """Verifies proper add/remove intf operation with Nexus L3 disabled. With 'nexus_l3_enable' configured to False, confirm that no changes are made to the Nexus switch running configuration when a virtual router interface is created and then deleted. """ cisco_config.CONF.set_override('nexus_l3_enable', False, 'CISCO') with self._network_subnet_router_interface(): self.assertFalse(self.mock_ncclient.manager.connect. return_value.edit_config.called) def test_create_svi_but_subnet_not_specified_exception(self): """Tests raising of SubnetNotSpecified exception. Tests that a SubnetNotSpecified exception is raised when an add_router_interface request is made for creating a switch virtual interface (SVI), but the request does not specify a subnet. """ cisco_config.CONF.set_override('nexus_l3_enable', True, 'CISCO') with self._network_subnet_router() as (network, subnet, router): with self._router_interface(router, subnet=None) as response: self._assertExpectedHTTP(response.status_int, c_exc.SubnetNotSpecified) def test_create_svi_but_port_id_included_exception(self): """Tests raising of PortIdForNexusSvi exception. Tests that a PortIdForNexusSvi exception is raised when an add_router_interface request is made for creating a switch virtual interface (SVI), but the request includes a virtual port ID. """ cisco_config.CONF.set_override('nexus_l3_enable', True, 'CISCO') with self._network_subnet_router_interface( port_id='my_port_id') as response: self._assertExpectedHTTP(response.status_int, c_exc.PortIdForNexusSvi) class TestCiscoPortsV2XML(TestCiscoPortsV2): fmt = 'xml' class TestCiscoNetworksV2XML(TestCiscoNetworksV2): fmt = 'xml' class TestCiscoSubnetsV2XML(TestCiscoSubnetsV2): fmt = 'xml' class TestCiscoRouterInterfacesV2XML(TestCiscoRouterInterfacesV2): fmt = 'xml'
{ "content_hash": "eaaf298bec5cfef0f712f2553bea21e9", "timestamp": "", "source": "github", "line_count": 1171, "max_line_length": 79, "avg_line_length": 45.4466268146883, "alnum_prop": 0.5709346461723477, "repo_name": "subramani95/neutron", "id": "4e7be3e873b626dcb0448e7edd396eb57a5eb448", "size": "53809", "binary": false, "copies": "7", "ref": "refs/heads/master", "path": "neutron/tests/unit/cisco/test_network_plugin.py", "mode": "33188", "license": "apache-2.0", "language": [], "symlink_target": "" }
from __future__ import print_function from __future__ import absolute_import from __future__ import division import os, sys from six.moves import range sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) import optparse from datetime import timedelta, datetime from zerver.lib.timestamp import datetime_to_timestamp from zerver.lib.utils import statsd_key import requests # Workaround to support the Python-requests 1.0 transition of .json # from a property to a function requests_json_is_function = callable(requests.Response.json) def extract_json_response(resp): if requests_json_is_function: return resp.json() else: return resp.json def get_data_url(buckets, realm): realm_key = statsd_key(realm, True) # This is the slightly-cleaned up JSON api version of https://graphiti.zulip.net/graphs/945c7aafc2d # # Fetches 1 month worth of data DATA_URL="https://stats1.zulip.net:444/render/?from=-1000d&format=json" for bucket in buckets: if realm != 'all': statsd_target = "stats.gauges.staging.users.active.%s.%s" % (realm_key, bucket) DATA_URL += "&target=%s" % (statsd_target,) else: # all means adding up all realms, but exclude the .all. metrics since that would double things DATA_URL += "&target=sum(exclude(stats.gauges.staging.users.active.*.%s, 'all'))" % (bucket,) return DATA_URL def get_data(url, username, pw): from requests.auth import HTTPDigestAuth res = requests.get(url, auth=HTTPDigestAuth(username, pw), verify=False) if res.status_code != 200: print("Failed to fetch data url: %s" % (res.error,)) return [] return extract_json_response(res) def noon_of(day=datetime.now()): return datetime(year=day.year, month=day.month, day=day.day, hour=12) def points_during_day(data, noon): """Returns all the points in the dataset that occur in the 12 hours around the datetime object that is passed in. data must be sorted.""" before = datetime_to_timestamp(noon - timedelta(hours=12)) after = datetime_to_timestamp(noon + timedelta(hours=12)) between = [pt for pt in data if pt[1] > before and pt[1] < after] return between def best_during_day(data, day): valid = sorted(points_during_day(data, day), key=lambda pt: pt[0], reverse=True) if len(valid): return valid[0][0] else: return None def percent_diff(prev, cur): if prev is None or cur is None: return None if cur == 0 and prev == 0: return "" if prev == 0: return "NaN" return "%.02f%%" % (((cur - prev) / prev) * 100,) def parse_data(data, today): def print_results(all_days, days, compare_with_last=False): first_data_point = True best_last_time = 0 for i in all_days: day = today - timedelta(days=i) # Ignore weekends if day.weekday() in days: best = best_during_day(metric['datapoints'], day) if best is None: continue if not compare_with_last: percent = percent_diff(best, best_today) else: if first_data_point: percent = "" first_data_point = False else: percent = percent_diff(best_last_time, best) if best is not None: print("Last %s, %s %s ago:\t%.01f\t\t%s" \ % (day.strftime("%A"), i, "days", best, percent)) best_last_time = best for metric in data: # print "Got %s with data points %s" % (metric['target'], len(metric['datapoints'])) # Calculate % between peak 2hr and 10min across each day and week metric['datapoints'].sort(key=lambda p: p[1]) best_today = best_during_day(metric['datapoints'], today) print("Date\t\t\t\tUsers\t\tChange from then to today") print("Today, 0 days ago:\t\t%.01f" % (best_today,)) print_results(range(1, 1000), [0, 1, 2, 3, 4, 7]) print("\n\nWeekly Wednesday results") print("Date\t\t\t\tUsers\t\tDelta from previous week") print_results(reversed(range(1, 1000)), [2], True) parser = optparse.OptionParser(r""" %prog --user username --password pw [--start-from unixtimestamp] Generates activity statistics with detailed week-over-week percentage change """) parser.add_option('--user', help='Graphite usernarme', metavar='USER') parser.add_option('--password', help='Graphite password', metavar='PASSWORD') parser.add_option('--start-from', help='What day to consider as \'today\' when calculating stats as a Unix timestamp', metavar='STARTDATE', default='today') parser.add_option('--realm', help='Which realm to query', default='all') parser.add_option('--bucket', help='Which bucket to query', default='12hr') if __name__ == '__main__': (options, args) = parser.parse_args() if not options.user or not options.password: parser.error("You must enter a username and password to log into graphite with") startfrom = noon_of(day=datetime.now()) if options.start_from != 'today': startfrom = noon_of(day=datetime.fromtimestamp(int(options.start_from))) print("Using baseline of today as %s" % (startfrom,)) realm_key = statsd_key(options.realm, True) buckets = [options.bucket] # This is the slightly-cleaned up JSON api version of https://graphiti.zulip.net/graphs/945c7aafc2d # # Fetches 1 month worth of data DATA_URL = get_data_url(buckets, options.realm) data = get_data(DATA_URL, options.user, options.password) parse_data(data, startfrom)
{ "content_hash": "31f9f77dd7dd4c4cfe524aa8a797acde", "timestamp": "", "source": "github", "line_count": 166, "max_line_length": 106, "avg_line_length": 35.91566265060241, "alnum_prop": 0.6071787990607179, "repo_name": "Frouk/zulip", "id": "1693ef39eba8e04b96c511aa6251cec050dd3e3d", "size": "6054", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "tools/deprecated/generate-activity-metrics.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "164" }, { "name": "CSS", "bytes": "183514" }, { "name": "CoffeeScript", "bytes": "18435" }, { "name": "Groovy", "bytes": "5516" }, { "name": "HTML", "bytes": "395036" }, { "name": "JavaScript", "bytes": "1582587" }, { "name": "Nginx", "bytes": "1228" }, { "name": "PHP", "bytes": "18930" }, { "name": "Pascal", "bytes": "1113" }, { "name": "Perl", "bytes": "383634" }, { "name": "Puppet", "bytes": "96085" }, { "name": "Python", "bytes": "1984569" }, { "name": "Ruby", "bytes": "255867" }, { "name": "Shell", "bytes": "33353" } ], "symlink_target": "" }
""" Provides a temporal matching function """ import numpy as np from scipy.spatial import cKDTree import pandas as pd def df_match(reference, *args, **kwds): """ Finds temporal match between the reference pandas.DataFrame (index has to be datetime) and n other pandas.DataFrame (index has to be datetime). Parameters ---------- reference : pandas.DataFrame or pandas.TimeSeries The index of this dataframe will be the reference. *args : pandas.DataFrame or pandas.TimeSeries The index of this dataframe(s) will be matched. window : float Fraction of days of the maximum pos./neg. distance allowed, i.e. the value of window represents the half-winow size (e.g. window=0.5, will search for matches between -12 and +12 hours) (default: None) dropna : boolean Drop rows containing only NaNs (default: False) dropduplicates : boolean Drop duplicated temporal matched (default: False) asym_window: string, optional ``<=`` stands for using a smaller and equal only for the left/smaller side of the window comparison ``>=`` stands for using a larger and equal only for the right/larger side of the window comparison The default is to use <= and >= for both sides of the search window Returns ------- temporal_matched_args : pandas.DataFrame or tuple of pandas.DataFrame Dataframe with index from matched reference index """ if "window" in kwds: window = kwds['window'] else: window = None if "asym_window" in kwds: asym_window = kwds['asym_window'] else: asym_window = None temporal_matched_args = [] ref_step = reference.index.values - reference.index.values[0] for arg in args: if type(arg) == pd.TimeSeries: arg = pd.DataFrame(arg) comp_step = arg.index.values - reference.index.values[0] values = np.arange(comp_step.size) # setup kdtree which must get 2D input try: tree = cKDTree(np.atleast_2d(comp_step).T, balanced_tree=False) except TypeError: # scipy before version 0.16 does not have the balanced_tree kw # but is fast in this case also without it tree = cKDTree(np.atleast_2d(comp_step).T) dist, i = tree.query(np.atleast_2d(ref_step).T) matched = values[i] distance = np.zeros_like(matched, dtype=np.float) distance.fill(np.nan) valid_match = np.invert(np.isnan(matched)) distance[valid_match] = \ (arg.index.values[np.int32(matched[valid_match])] - reference.index.values[valid_match]) / np.timedelta64(1, 'D') arg['index'] = arg.index.values arg['merge_key'] = np.arange(len(arg)) arg_matched = pd.DataFrame({'merge_key': matched, 'distance': distance, 'ref_index': reference.index.values}) arg_matched = arg_matched.merge(arg, on="merge_key", how="left") arg_matched.index = arg_matched['ref_index'].values arg_matched = arg_matched.sort_index() if window is not None: if asym_window is None: invalid_dist = arg_matched['distance'].abs() > window if asym_window == "<=": # this means that only distance in the interval [distance[ are # taken valid_dist = ((arg_matched['distance'] >= 0.0) & (arg_matched['distance'] <= window)) | ( (arg_matched['distance'] <= 0.0) & (arg_matched['distance'] > -window)) invalid_dist = ~valid_dist if asym_window == ">=": # this means that only distance in the interval ]distance] are # taken valid_dist = ((arg_matched['distance'] >= 0.0) & (arg_matched['distance'] < window)) | ( (arg_matched['distance'] <= 0.0) & (arg_matched['distance'] >= -window)) invalid_dist = ~valid_dist arg_matched.loc[invalid_dist] = np.nan if "dropna" in kwds and kwds['dropna']: arg_matched = arg_matched.dropna() if "dropduplicates" in kwds and kwds['dropduplicates']: arg_matched = arg_matched.dropna() g = arg_matched.groupby('merge_key') min_dists = g.distance.apply(lambda x: x.abs().idxmin()) arg_matched = arg_matched.ix[min_dists] temporal_matched_args.append( arg_matched.drop(['merge_key', 'ref_index'], axis=1)) if len(temporal_matched_args) == 1: return temporal_matched_args[0] else: return tuple(temporal_matched_args) def matching(reference, *args, **kwargs): """ Finds temporal match between the reference pandas.TimeSeries (index has to be datetime) and n other pandas.TimeSeries (index has to be datetime). Parameters ---------- reference : pandas.TimeSeries The index of this Series will be the reference. *args : pandas.TimeSeries The index of these Series(s) will be matched. window : float Fraction of days of the maximum pos./neg. distance allowed, i.e. the value of window represents the half-winow size (e.g. window=0.5, will search for matches between -12 and +12 hours) (default: None) Returns ------- temporal_match : pandas.DataFrame containing the index of the reference Series and a column for each of the other input Series """ matched_datasets = df_match(reference, *args, dropna=True, dropduplicates=True, **kwargs) if type(matched_datasets) != tuple: matched_datasets = [matched_datasets] matched_data = pd.DataFrame(reference) for match in matched_datasets: match = match.drop(['distance', 'index'], axis=1) matched_data = matched_data.join(match) return matched_data.dropna()
{ "content_hash": "97c0d107aa465c4734e202878c65b83a", "timestamp": "", "source": "github", "line_count": 157, "max_line_length": 107, "avg_line_length": 39.30573248407644, "alnum_prop": 0.5880732458272565, "repo_name": "christophreimer/pytesmo", "id": "33e50b17ee4e0aad6a529ba1e85ac0311ff614a4", "size": "6171", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pytesmo/temporal_matching.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Batchfile", "bytes": "1842" }, { "name": "PowerShell", "bytes": "2786" }, { "name": "Python", "bytes": "395114" } ], "symlink_target": "" }
""" Support for Nest Thermostat Sensors. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/sensor.nest/ """ from itertools import chain import logging import voluptuous as vol from homeassistant.components.nest import DATA_NEST, DOMAIN from homeassistant.helpers.entity import Entity from homeassistant.const import ( TEMP_CELSIUS, TEMP_FAHRENHEIT, CONF_PLATFORM, CONF_SCAN_INTERVAL, CONF_MONITORED_CONDITIONS ) DEPENDENCIES = ['nest'] SENSOR_TYPES = ['humidity', 'operation_mode'] SENSOR_TYPES_DEPRECATED = ['last_ip', 'local_ip', 'last_connection'] SENSOR_TYPES_DEPRECATED = ['last_ip', 'local_ip'] WEATHER_VARS = {} DEPRECATED_WEATHER_VARS = {'weather_humidity': 'humidity', 'weather_temperature': 'temperature', 'weather_condition': 'condition', 'wind_speed': 'kph', 'wind_direction': 'direction'} SENSOR_UNITS = {'humidity': '%', 'temperature': '°C'} PROTECT_VARS = ['co_status', 'smoke_status', 'battery_health'] PROTECT_VARS_DEPRECATED = ['battery_level'] SENSOR_TEMP_TYPES = ['temperature', 'target'] _SENSOR_TYPES_DEPRECATED = SENSOR_TYPES_DEPRECATED \ + list(DEPRECATED_WEATHER_VARS.keys()) + PROTECT_VARS_DEPRECATED _VALID_SENSOR_TYPES = SENSOR_TYPES + SENSOR_TEMP_TYPES + PROTECT_VARS \ + list(WEATHER_VARS.keys()) _VALID_SENSOR_TYPES_WITH_DEPRECATED = _VALID_SENSOR_TYPES \ + _SENSOR_TYPES_DEPRECATED PLATFORM_SCHEMA = vol.Schema({ vol.Required(CONF_PLATFORM): DOMAIN, vol.Optional(CONF_SCAN_INTERVAL): vol.All(vol.Coerce(int), vol.Range(min=1)), vol.Required(CONF_MONITORED_CONDITIONS): [vol.In(_VALID_SENSOR_TYPES_WITH_DEPRECATED)] }) _LOGGER = logging.getLogger(__name__) def setup_platform(hass, config, add_devices, discovery_info=None): """Setup the Nest Sensor.""" if discovery_info is None: return nest = hass.data[DATA_NEST] conf = config.get(CONF_MONITORED_CONDITIONS, _VALID_SENSOR_TYPES) for variable in conf: if variable in _SENSOR_TYPES_DEPRECATED: if variable in DEPRECATED_WEATHER_VARS: wstr = ("Nest no longer provides weather data like %s. See " "https://home-assistant.io/components/#weather " "for a list of other weather components to use." % variable) else: wstr = (variable + " is no a longer supported " "monitored_conditions. See " "https://home-assistant.io/components/" "binary_sensor.nest/ " "for valid options, or remove monitored_conditions " "entirely to get a reasonable default") _LOGGER.error(wstr) all_sensors = [] for structure, device in chain(nest.devices(), nest.protect_devices()): sensors = [NestBasicSensor(structure, device, variable) for variable in conf if variable in SENSOR_TYPES and is_thermostat(device)] sensors += [NestTempSensor(structure, device, variable) for variable in conf if variable in SENSOR_TEMP_TYPES and is_thermostat(device)] sensors += [NestProtectSensor(structure, device, variable) for variable in conf if variable in PROTECT_VARS and is_protect(device)] all_sensors.extend(sensors) add_devices(all_sensors, True) def is_thermostat(device): """Target devices that are Nest Thermostats.""" return bool(device.__class__.__name__ == 'Device') def is_protect(device): """Target devices that are Nest Protect Smoke Alarms.""" return bool(device.__class__.__name__ == 'ProtectDevice') class NestSensor(Entity): """Representation of a Nest sensor.""" def __init__(self, structure, device, variable): """Initialize the sensor.""" self.structure = structure self.device = device self.variable = variable # device specific self._location = self.device.where self._name = self.device.name_long self._state = None @property def name(self): """Return the name of the nest, if any.""" return "{} {}".format(self._name, self.variable.replace("_", " ")) class NestBasicSensor(NestSensor): """Representation a basic Nest sensor.""" @property def state(self): """Return the state of the sensor.""" return self._state @property def unit_of_measurement(self): """Return the unit the value is expressed in.""" return SENSOR_UNITS.get(self.variable, None) def update(self): """Retrieve latest state.""" if self.variable == 'operation_mode': self._state = getattr(self.device, "mode") else: self._state = getattr(self.device, self.variable) class NestTempSensor(NestSensor): """Representation of a Nest Temperature sensor.""" @property def unit_of_measurement(self): """Return the unit the value is expressed in.""" if self.device.temperature_scale == 'C': return TEMP_CELSIUS else: return TEMP_FAHRENHEIT @property def state(self): """Return the state of the sensor.""" return self._state def update(self): """Retrieve latest state.""" temp = getattr(self.device, self.variable) if temp is None: self._state = None if isinstance(temp, tuple): low, high = temp self._state = "%s-%s" % (int(low), int(high)) else: self._state = round(temp, 1) class NestProtectSensor(NestSensor): """Return the state of nest protect.""" @property def state(self): """Return the state of the sensor.""" return self._state def update(self): """Retrieve latest state.""" self._state = getattr(self.device, self.variable).capitalize()
{ "content_hash": "4c429d4a7b44a1d0681ad2b57e86afa5", "timestamp": "", "source": "github", "line_count": 200, "max_line_length": 79, "avg_line_length": 31.4, "alnum_prop": 0.5937898089171975, "repo_name": "dmeulen/home-assistant", "id": "53f767ab49454080cfb4e56e1a1d01acfcfb6c46", "size": "6281", "binary": false, "copies": "1", "ref": "refs/heads/dev", "path": "homeassistant/components/sensor/nest.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "1435271" }, { "name": "Python", "bytes": "4390736" }, { "name": "Ruby", "bytes": "379" }, { "name": "Shell", "bytes": "4473" } ], "symlink_target": "" }
try: import unittest.mock as mock except: import mock from twitter_tunes.scripts import twitter_bot from twitter_tunes.tests import bot_test_vars import pytest def redis_side_effect(arg): if arg == u'trends': return bot_test_vars.REDIS_TRENDS elif arg == u'last_tweets': return bot_test_vars.REDIS_LAST_POSTS[:] @mock.patch('tweepy.API') def test_make_tweet_static_message(api): """Test if bot makes tweet with a set message.""" mock_method = api().update_status twitter_bot.make_tweet(u"more tests") mock_method.assert_called_with(u"more tests") @mock.patch('twitter_tunes.scripts.twitter_bot.redis_data.set_redis_data') @mock.patch('twitter_tunes.scripts.twitter_bot.redis_data.get_redis_data') @mock.patch('twitter_tunes.scripts.twitter_bot.youtube_api.get_link') @mock.patch('twitter_tunes.scripts.twitter_api') @mock.patch('tweepy.API') def test_main_good(api, twitter_api, get_link, get_redis_data, set_redis_data): """Test if bot makes tweet with api data. This is what the main function would do. """ from twitter_tunes.tests import test_twitter_api test_url = u"https://www.youtube.com/watch?v=oyEuk8j8imI" mock_update_status = api().update_status mock_trends = twitter_api() mock_trends.call_twitter_api.return_value = test_twitter_api.FINAL_OUTPUT get_link.return_value = (test_url, True) get_redis_data.side_effect = redis_side_effect redis_trends = get_redis_data(u'trends') trends = redis_trends[u'trends'] da_trend, url = twitter_bot.choose_trend(trends) message = twitter_bot.create_message(da_trend, url) twitter_bot.make_tweet(message) mock_update_status.assert_called_with(message) @mock.patch('twitter_tunes.scripts.twitter_bot.redis_data.set_redis_data') @mock.patch('twitter_tunes.scripts.twitter_bot.redis_data.get_redis_data') @mock.patch('twitter_tunes.scripts.twitter_bot.twitter_api.call_twitter_api') @mock.patch('twitter_tunes.scripts.twitter_bot.youtube_api.get_link') @mock.patch('tweepy.API') def test_main_bad_twitter(api, youtube_api, call_twitter_api, get_redis_data, set_redis_data): """Test if main does stuff if twitter goes horribly wrong. Make sure it can keep going.""" get_redis_data.return_value = {} youtube_api.return_value = (u'url', True) call_twitter_api.side_effect = ValueError('Missing OAuth key or token.') assert twitter_bot.main() == u'Something went horribly wrong.' @mock.patch('twitter_tunes.scripts.twitter_bot.redis_data.set_redis_data') @mock.patch('twitter_tunes.scripts.twitter_bot.redis_data.get_redis_data') @mock.patch('twitter_tunes.scripts.twitter_api') @mock.patch('twitter_tunes.scripts.twitter_bot.youtube_api.get_link') @mock.patch('twitter_tunes.scripts.twitter_bot.tweepy.API.update_status') def test_main_bad_update(update_status, youtube_api, twitter_api, get_redis_data, set_redis_data): """Test if main does stuff if tweepy goes horribly wrong. Make sure it can keep going.""" from tweepy import TweepError get_redis_data.side_effect = redis_side_effect youtube_api.return_value = (u'url', True) update_status.side_effect = TweepError("Couldn't Post") assert twitter_bot.main() == u'Something went horribly wrong.' @mock.patch('twitter_tunes.scripts.twitter_bot.redis_data.set_redis_data') @mock.patch('twitter_tunes.scripts.twitter_bot.redis_data.get_redis_data') @mock.patch('twitter_tunes.scripts.twitter_api') @mock.patch('twitter_tunes.scripts.youtube_api') @mock.patch('twitter_tunes.scripts.twitter_bot.tweepy.API.trends_place') def test_main_bad_get_trends(trends_place, yt, tw, get, set): """Test if main does stuff if tweepy goes horribly wrong. Make sure it can keep going.""" from tweepy import RateLimitError get.return_value = {} trends_place.side_effect = RateLimitError("Slow Down!") assert twitter_bot.main() == u'Something went horribly wrong.' @mock.patch('twitter_tunes.scripts.twitter_bot.redis_data.set_redis_data') @mock.patch('twitter_tunes.scripts.twitter_bot.redis_data.get_redis_data') @mock.patch('twitter_tunes.scripts.twitter_api') @mock.patch('tweepy.API') @mock.patch('twitter_tunes.scripts.twitter_bot.youtube_api.youtube_search') def test_main_bad_youtube(youtube_search, tweepy, ta, get, set): """Test if main does stuff if twitter goes horribly wrong. Make sure it can keep going.""" get.side_effect = redis_side_effect from apiclient.errors import HttpError youtube_search.side_effect = HttpError('Uhh', b'youtube broke.') assert twitter_bot.main() == u'Something went horribly wrong.' @mock.patch('twitter_tunes.scripts.twitter_bot.redis_data.get_redis_data') def test_main_bad_redis(get_redis_data): """Test if main does stuff if redis goes horribly wrong.""" from requests.exceptions import ConnectionError get_redis_data.side_effect = ConnectionError assert twitter_bot.main() == u'Something went horribly wrong.' def test_bot_create_message_known_params(): """Test to see bot can return a message. Should contain the trend name and a youtube url. """ url = u'https://www.youtube.com/watch?v=rTfa-9aCTYg' message = twitter_bot.create_message(u'A Trend', url) assert u'A Trend' in message and url in message @mock.patch('twitter_tunes.scripts.youtube_api.get_link') def test_bot_message_function_params(get_link): """Test to see bot can return a message. Message should be based on the returns of other functions. """ from twitter_tunes.scripts import parser trend = u"#StoryFromNorthAmerica" parse_trend = parser.parse_trend(trend) # Results that would come from searching this trend. # Saved locally to prevent api calls each test. get_link.return_value = (u'https://www.youtube.com/watch?v=ms2klX-puUU', True) url = get_link(parse_trend) message = twitter_bot.create_message(trend, url[0]) assert (u'#StoryFromNorthAmerica' in message and u'https://www.youtube.com/watch?v=ms2klX-puUU' in message) @mock.patch('twitter_tunes.scripts.twitter_bot.redis_data.set_redis_data') @mock.patch('twitter_tunes.scripts.twitter_bot.redis_data.get_redis_data') @mock.patch('twitter_tunes.scripts.twitter_bot.youtube_api.get_link') def test_bot_choose_trend(get_link, get_redis_data, set_redis_data): """Test choose trend function. Should return the 'best' trend from the trends searched by twitter_api. Best trend should be the first music related trend it can find. """ from twitter_tunes.scripts import parser def yt_side_effect(arg): if arg == parser.parse_trend(bot_test_vars.TRENDS[1]): return (good_url, True) elif arg == parser.parse_trend(bot_test_vars.TRENDS[2]): return (good_url, True) else: return (bad_url, False) get_link.side_effect = yt_side_effect get_redis_data.side_effect = redis_side_effect redis_trends = get_redis_data(u'trends') trends = redis_trends[u'trends'] good_url = u'https://www.youtube.com/watch?v=ms2klX-puUU' bad_url = u'https://www.youtube.com/watch?v=cU8HrO7XuiE' assert twitter_bot.choose_trend(trends)[0] == bot_test_vars.TRENDS[2] # trend you expect. @mock.patch('twitter_tunes.scripts.twitter_bot.redis_data.set_redis_data') @mock.patch('twitter_tunes.scripts.twitter_bot.redis_data.get_redis_data') @mock.patch('twitter_tunes.scripts.twitter_bot.youtube_api.get_link') def test_bot_choose_trend_bad_redis(get_link, get_redis_data, set_redis_data): """test if the choosing trend handles a bad redis call.""" get_link.return_value = (u'url', True) def redis_side_effect_bad(arg): if arg == u'trends': return bot_test_vars.REDIS_TRENDS elif arg == u'last_tweets': return {} get_redis_data.side_effect = redis_side_effect_bad redis_trends = get_redis_data(u'trends') trends = redis_trends[u'trends'] assert twitter_bot.choose_trend(trends)[0] == bot_test_vars.TRENDS[0] @mock.patch('twitter_tunes.scripts.twitter_bot.redis_data.set_redis_data') @mock.patch('twitter_tunes.scripts.twitter_bot.redis_data.get_redis_data') @mock.patch('twitter_tunes.scripts.twitter_bot.youtube_api.get_link') def test_bot_choose_trend_trend_long(get_link, get_redis_data, set_redis_data): """test if the choosing trend handles a long last_tweets""" get_link.return_value = (u'url', True) def redis_side_effect_bad(arg): if arg == u'trends': return bot_test_vars.REDIS_TRENDS elif arg == u'last_tweets': return [u'trend', u'trend', u'trend', u'trend', u'trend'] get_redis_data.side_effect = redis_side_effect_bad redis_trends = get_redis_data(u'trends') trends = redis_trends[u'trends'] assert twitter_bot.choose_trend(trends)[0] == bot_test_vars.TRENDS[0] @mock.patch('twitter_tunes.scripts.twitter_bot.tweepy.API.update_status') def test_make_tweet_bad(update_status): """Test if make_tweet breaks.""" import twitter_tunes.scripts.twitter_bot as bot old_key = bot.consumerKey bot.consumerKey = None with pytest.raises(ValueError): twitter_bot.make_tweet('Please dont post') bot.consumerKey = old_key
{ "content_hash": "3001e1cd238d85747af752662c547a9a", "timestamp": "", "source": "github", "line_count": 226, "max_line_length": 94, "avg_line_length": 41.49557522123894, "alnum_prop": 0.699616122840691, "repo_name": "icarrera/twitter-tunes", "id": "f021d921b44e6ead9db26f45d3ab19b27e4ea23d", "size": "9402", "binary": false, "copies": "1", "ref": "refs/heads/staging", "path": "twitter_tunes/tests/test_bot.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "17144" }, { "name": "JavaScript", "bytes": "1510" }, { "name": "Python", "bytes": "55059" }, { "name": "Shell", "bytes": "61" } ], "symlink_target": "" }
import os from oslo_config import cfg from toscaparser import tosca_template import unittest import yaml from tacker.common import utils from tacker.plugins.common import constants as evt_constants from tacker.tests import constants from tacker.tests.functional import base from tacker.tests.utils import read_file from tacker.tosca import utils as toscautils CONF = cfg.CONF SOFTWARE_DEPLOYMENT = 'OS::Heat::SoftwareDeployment' class VnfTestToscaVNFC(base.BaseTackerTest): @unittest.skip("Until BUG 1673012") def test_create_delete_tosca_vnfc(self): input_yaml = read_file('sample_tosca_vnfc.yaml') tosca_dict = yaml.safe_load(input_yaml) path = os.path.abspath(os.path.join( os.path.dirname(__file__), "../../etc/samples")) vnfd_name = 'sample-tosca-vnfc' tosca_dict['topology_template']['node_templates' ]['firewall_vnfc' ]['interfaces' ]['Standard']['create'] = path \ + '/install_vnfc.sh' tosca_arg = {'vnfd': {'name': vnfd_name, 'attributes': {'vnfd': tosca_dict}}} # Create vnfd with tosca template vnfd_instance = self.client.create_vnfd(body=tosca_arg) self.assertIsNotNone(vnfd_instance) # Create vnf with vnfd_id vnfd_id = vnfd_instance['vnfd']['id'] vnf_arg = {'vnf': {'vnfd_id': vnfd_id, 'name': "test_tosca_vnfc"}} vnf_instance = self.client.create_vnf(body=vnf_arg) vnf_id = vnf_instance['vnf']['id'] self.wait_until_vnf_active(vnf_id, constants.VNFC_CREATE_TIMEOUT, constants.ACTIVE_SLEEP_TIME) self.assertEqual('ACTIVE', self.client.show_vnf(vnf_id)['vnf']['status']) self.validate_vnf_instance(vnfd_instance, vnf_instance) self.verify_vnf_crud_events( vnf_id, evt_constants.RES_EVT_CREATE, evt_constants.PENDING_CREATE, cnt=2) self.verify_vnf_crud_events( vnf_id, evt_constants.RES_EVT_CREATE, evt_constants.ACTIVE) # Validate mgmt_url with input yaml file mgmt_url = self.client.show_vnf(vnf_id)['vnf']['mgmt_url'] self.assertIsNotNone(mgmt_url) mgmt_dict = yaml.safe_load(str(mgmt_url)) input_dict = yaml.safe_load(input_yaml) toscautils.updateimports(input_dict) tosca = tosca_template.ToscaTemplate(parsed_params={}, a_file=False, yaml_dict_tpl=input_dict) vdus = toscautils.findvdus(tosca) self.assertEqual(len(vdus), len(mgmt_dict.keys())) for vdu in vdus: self.assertIsNotNone(mgmt_dict[vdu.name]) self.assertEqual(True, utils.is_valid_ipv4(mgmt_dict[vdu.name])) # Check the status of SoftwareDeployment heat_stack_id = self.client.show_vnf(vnf_id)['vnf']['instance_id'] resource_types = self.h_client.resources resources = resource_types.list(stack_id=heat_stack_id) for resource in resources: resource = resource.to_dict() if resource['resource_type'] == \ SOFTWARE_DEPLOYMENT: self.assertEqual('CREATE_COMPLETE', resource['resource_status']) break # Delete vnf_instance with vnf_id try: self.client.delete_vnf(vnf_id) except Exception: assert False, "vnf Delete of test_vnf_with_multiple_vdus failed" self.wait_until_vnf_delete(vnf_id, constants.VNF_CIRROS_DELETE_TIMEOUT) self.verify_vnf_crud_events(vnf_id, evt_constants.RES_EVT_DELETE, evt_constants.PENDING_DELETE, cnt=2) # Delete vnfd_instance self.addCleanup(self.client.delete_vnfd, vnfd_id)
{ "content_hash": "68f012649943adf902a6ba9e3c7eadae", "timestamp": "", "source": "github", "line_count": 103, "max_line_length": 79, "avg_line_length": 38.95145631067961, "alnum_prop": 0.5837487537387837, "repo_name": "zeinsteinz/tacker", "id": "bfc14382fe34b1cf28d0b37c5a0921808fc0533a", "size": "4585", "binary": false, "copies": "1", "ref": "refs/heads/feyman", "path": "tacker/tests/functional/vnfm/test_tosca_vnfc.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Mako", "bytes": "1046" }, { "name": "Python", "bytes": "1197700" }, { "name": "Shell", "bytes": "25674" } ], "symlink_target": "" }
""" SchedulerOptions monitors a local .json file for changes and loads it if needed. This file is converted to a data structure and passed into the filtering and weighing functions which can use it for dynamic configuration. """ import datetime import os from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import excutils from oslo_utils import timeutils import jacket.compute.conf from jacket.i18n import _LE CONF = jacket.compute.conf.CONF LOG = logging.getLogger(__name__) class SchedulerOptions(object): """SchedulerOptions monitors a local .json file for changes and loads it if needed. This file is converted to a data structure and passed into the filtering and weighing functions which can use it for dynamic configuration. """ def __init__(self): super(SchedulerOptions, self).__init__() self.data = {} self.last_modified = None self.last_checked = None def _get_file_handle(self, filename): """Get file handle. Broken out for testing.""" return open(filename) def _get_file_timestamp(self, filename): """Get the last modified datetime. Broken out for testing.""" try: return os.path.getmtime(filename) except os.error: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Could not stat scheduler options file " "%(filename)s"), {'filename': filename}) def _load_file(self, handle): """Decode the JSON file. Broken out for testing.""" try: return jsonutils.load(handle) except ValueError: LOG.exception(_LE("Could not decode scheduler options")) return {} def _get_time_now(self): """Get current UTC. Broken out for testing.""" return timeutils.utcnow() def get_configuration(self, filename=None): """Check the json file for changes and load it if needed.""" if not filename: filename = CONF.compute_scheduler_json_config_location if not filename: return self.data if self.last_checked: now = self._get_time_now() if now - self.last_checked < datetime.timedelta(minutes=5): return self.data last_modified = self._get_file_timestamp(filename) if (not last_modified or not self.last_modified or last_modified > self.last_modified): self.data = self._load_file(self._get_file_handle(filename)) self.last_modified = last_modified if not self.data: self.data = {} return self.data
{ "content_hash": "e31f0f6336ffc041aae406a426ea178f", "timestamp": "", "source": "github", "line_count": 83, "max_line_length": 76, "avg_line_length": 32.89156626506024, "alnum_prop": 0.6267399267399267, "repo_name": "HybridF5/jacket", "id": "84412856b004d612582ac7032867e8e34ba283d8", "size": "3370", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "jacket/compute/scheduler/scheduler_options.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "26995056" }, { "name": "Shell", "bytes": "28464" }, { "name": "Smarty", "bytes": "291947" } ], "symlink_target": "" }
"""Adding additional return value handlers is easy.""" from web.core.response import register class MyObject(object): pass def handler(context, result): pass # do something to the context.response class SampleExtension(object): always = True # usually you don't want to be required to activate your own extensions provides = ['sample'] # can be omitted if always is True def __init__(self, config): """Executed to configure the extension.""" super(SampleExtension, self).__init__() def start(self): # Register our custom handler; be sure to pick your type carefully! register(handler, MyObject)
{ "content_hash": "3845aaa473e45c60b6629fd9d6427881", "timestamp": "", "source": "github", "line_count": 24, "max_line_length": 89, "avg_line_length": 27.833333333333332, "alnum_prop": 0.6766467065868264, "repo_name": "marrow/WebCore", "id": "3365975d8980ba049109557659c84c527d0be4cf", "size": "687", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "docs/old/recipes/response_registry.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "747" }, { "name": "Makefile", "bytes": "901" }, { "name": "Python", "bytes": "133793" } ], "symlink_target": "" }
import sys import numpy as np import scipy.ndimage.filters from matplotlib.figure import Figure from matplotlib.backends.backend_agg import FigureCanvasAgg import imtools from basename import get_basename mkoutfilename = None def test_corner_samples( upper_left, upper_right, lower_left, lower_right ) : pass def make_montage( upper_left, upper_right, lower_left, lower_right ) : sample_size = upper_left.shape[0] # Make a big image with all four samples. Image has a boundary of black # around the samples. montage = np.zeros( (sample_size*2+6,sample_size*2+6) ) # ugly, ugly, ugly montage[ 2:sample_size+2, 2:sample_size+2 ] = upper_left montage[ 2+sample_size+2:2+sample_size+2+sample_size, 2:sample_size+2 ] = lower_left montage[ 2:sample_size+2 , 2+sample_size+2:2+sample_size+2+sample_size] = upper_right montage[ 2+sample_size+2:2+sample_size+2+sample_size, 2+sample_size+2:2+sample_size+2+sample_size ] = lower_right if mkoutfilename: imtools.clip_and_save(montage,mkoutfilename("mont")) fig = Figure() # make four histograms on single plot to match our montage image for pos,sample in zip((221,222,223,224),(upper_left,upper_right,lower_left,lower_right)): ax = fig.add_subplot(pos) hist,bins = np.histogram(sample,bins=256,range=(0,255)) ax.grid() ax.plot(hist) if mkoutfilename: outfilename = mkoutfilename( "mont_hist" ).replace(".tif",".png") canvas = FigureCanvasAgg(fig) canvas.print_figure(outfilename) print "wrote", outfilename def straightness_test( ndata, gray_low, gray_high, sample_size=60 ) : # Is the ratio of the image about what the q60 card should be? If we're way # off, no reason to go any further. print ndata.shape # q60 ~= 3x5 ?? aspect_ratio = float(ndata.shape[0])/ndata.shape[1] print "aspect_ratio={0}".format(aspect_ratio) int_aspect_ratio = int(round(aspect_ratio*100)) print int_aspect_ratio if int_aspect_ratio < 70 or int_aspect_ratio > 75 : errmsg = "aspect_ratio={0} out of range".format(aspect_ratio) print >>sys.stderr, errmsg # assert 0 return False # Grab a square sample from all four edges. If those samples look pretty # close to the Q60's corners, then let's call it straight enough. # sample_size = 20 upper_left = ndata[ 0:sample_size, 0:sample_size ] lower_left = ndata[ -sample_size:, 0:sample_size ] upper_right = ndata[ 0:sample_size, -sample_size: ] lower_right = ndata[ -sample_size:, -sample_size: ] # save all the slices to an image make_montage( upper_left, upper_right, lower_left, lower_right ) count = 0 for name,sample in zip(("ul","ur","lr","ll"),(upper_left,upper_right,lower_left,lower_right)): print "{0} min={1} max={2} median={3} stddev={4}".format( name,np.min(sample),np.max(sample),np.median(sample),np.std(sample)) # if 3 of the four corners are 90(ish)% gray, call it good grayish_mask = np.logical_and( np.less( sample, gray_high ), np.greater( sample, gray_low ) ) grayish = np.extract( grayish_mask, sample ) percent_gray = (float(grayish.size)/sample.size)*100 print "pixels={0} grayish={1} {2}".format( sample.size, grayish.size, percent_gray ) if percent_gray >= 90 : count += 1 # save the samples while test/debugging np.save(name+".npy",sample) return count >= 3 def main() : infilename = sys.argv[1] ndata = imtools.load_image(infilename,dtype="uint8",mode="L") # aggressive median filter to smooth out as much noise as possible fdata = scipy.ndimage.filters.median_filter( ndata, size=(5,5) ) basename = get_basename(infilename) global mkoutfilename mkoutfilename = lambda s : "{0}_{1}.tif".format(basename,s) is_straight = straightness_test( fdata, 126, 146, 60 ) if is_straight : print "is straight enough" else : print "is NOT straight enough" if __name__=='__main__' : main()
{ "content_hash": "9f555d78e4a3508776d6675305540df9", "timestamp": "", "source": "github", "line_count": 114, "max_line_length": 117, "avg_line_length": 36.1578947368421, "alnum_prop": 0.6501698204754973, "repo_name": "linuxlizard/q60", "id": "8df91d5e229034cf0be2b75aefbc6c150b017025", "size": "4132", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "straight.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "69866" } ], "symlink_target": "" }
import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.pngmath', 'sphinx.ext.mathjax', 'sphinx.ext.viewcode'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'QUAC' copyright = u'2012-2015, Los Alamos National Security, LLC and others' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. #version = '0.1' # The full version, including alpha/beta/rc tags. #release = '0.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. today_fmt = '%Y-%m-%d %H:%M %Z' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. #pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. import sphinx_rtd_theme html_theme = 'sphinx_rtd_theme' html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] highlight_language = 'console' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {'bodyfont': 'serif', # for agogo # 'pagewidth': '60em', # 'documentwidth': '43em', # 'sidebarwidth': '17em', # 'textalign':'left'} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". # #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". #html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. html_domain_indices = False # If false, no index is generated. html_use_index = False # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. html_show_sourcelink = False # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. html_show_sphinx = False # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'QUACdoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'QUAC.tex', u'QUAC Documentation', u'Reid Priedhorsky, Aron Culotta, and others', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'QUAC', u'QUAC Documentation', [u'Reid Priedhorsky, Aron Culotta, and others'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'QUAC', u'QUAC Documentation', u'Reid Priedhorsky, Aron Culotta, and others', 'QUAC', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote'
{ "content_hash": "7177b29dccf2d6df0d8061e4df5b06f0", "timestamp": "", "source": "github", "line_count": 237, "max_line_length": 93, "avg_line_length": 32.9915611814346, "alnum_prop": 0.693694845888221, "repo_name": "casmlab/quac", "id": "3251e9edab4681d9d751aaba54fc6d40d6e8e677", "size": "8234", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "doc-src/conf.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "5004" }, { "name": "Gnuplot", "bytes": "1396" }, { "name": "Makefile", "bytes": "12373" }, { "name": "PLpgSQL", "bytes": "2740" }, { "name": "Python", "bytes": "570122" }, { "name": "Shell", "bytes": "56557" } ], "symlink_target": "" }
import re, sys, time, os import functools as fu import sublime, sublime_plugin from copy import copy from .lib.misc import * from .lib import kill_ring from .lib import isearch import Default.paragraph as paragraph from . import sbp_layout as ll # repeatable commands repeatable_cmds = set(['move', 'left_delete', 'right_delete', 'undo', 'redo']) # built-in commands we need to do ensure_visible after being run # REMIND: I think we can delete this. built_in_ensure_visible_cmds = set(['move', 'move_to']) class ViewWatcher(sublime_plugin.EventListener): def __init__(self, *args, **kwargs): super(ViewWatcher, self).__init__(*args, **kwargs) self.pending_dedups = 0 def on_close(self, view): ViewState.on_view_closed(view) def on_activated(self, view): update_pinned_status(view) def on_deactivated(self, view): self.disable_empty_active_mark(view) def on_activated_async(self, view): info = isearch.info_for(view) if info and not view.settings().get("is_widget"): # stop the search if we activated a new view in this window info.done() def on_query_context(self, view, key, operator, operand, match_all): def test(a): if operator == sublime.OP_EQUAL: return a == operand if operator == sublime.OP_NOT_EQUAL: return a != operand return False if key == "i_search_active": return test(isearch.info_for(view) is not None) if key == "sbp_has_active_mark": return test(CmdUtil(view).state.active_mark) if key == "sbp_has_visible_selection": return test(view.sel()[0].size() > 1) if key == "sbp_use_alt_bindings": return test(settings_helper.get("sbp_use_alt_bindings")) if key == "sbp_use_super_bindings": return test(settings_helper.get("sbp_use_super_bindings")) if key == "sbp_alt+digit_inserts": return test(settings_helper.get("sbp_alt+digit_inserts") or not settings_helper.get("sbp_use_alt_bindings")) if key == 'sbp_has_prefix_argument': return test(CmdUtil(view).has_prefix_arg()) if key == "sbp_catchall": return True def on_post_save(self, view): # Schedule a dedup, but do not do it NOW because it seems to cause a crash if, say, we're # saving all the buffers right now. So we schedule it for the future. self.pending_dedups += 1 def doit(): self.pending_dedups -= 1 if self.pending_dedups == 0: dedup_views(sublime.active_window()) sublime.set_timeout(doit, 50) # # Turn off active mark mode in all the views related to this view. # # REMIND: Sadly this is called N times for the N views that are related to the specified view, # and then we iterator through all N views. So this is N-squared sadness, for usually 2 or fewer # views ... # def on_modified(self, view): self.disable_empty_active_mark(view, False) def disable_empty_active_mark(self, view, must_be_empty = True): for related_view in ViewState.most_recent_related_view(view): util = CmdUtil(related_view) selection = related_view.sel() regions = list(selection) if not must_be_empty or util.all_empty_regions(regions): util.toggle_active_mark_mode(False) ViewState.get(related_view).this_cmd = None # # CmdWatcher watches all the commands and tries to correctly process the following situations: # # - canceling i-search if another window command is performed or a mouse drag starts # - override commands and run them N times if there is a numeric argument supplied # - if transient mark mode, automatically extend the mark when using certain commands like forward # word or character # class CmdWatcher(sublime_plugin.EventListener): def __init__(self, *args, **kwargs): super(CmdWatcher, self).__init__(*args, **kwargs) def on_post_window_command(self, window, cmd, args): # update_pinned_status(window.active_view()) info = isearch.info_for(window) if info is None: return None # Some window commands take us to new view. Here's where we abort the isearch if that happens. if window.active_view() != info.view: info.done() # # Override some commands to execute them N times if the numeric argument is supplied. # def on_text_command(self, view, cmd, args): # escape the current isearch if one is in progress, unless the command is already related to # isearch if isearch.info_for(view) is not None: if cmd not in ('sbp_inc_search', 'sbp_inc_search_escape', 'drag_select'): return ('sbp_inc_search_escape', {'next_cmd': cmd, 'next_args': args}) return vs = ViewState.get(view) if args is None: args = {} # first keep track of this_cmd and last_cmd (if command starts with "sbp_" it's handled # elsewhere) if not cmd.startswith("sbp_"): vs.this_cmd = cmd # # Process events that create a selection. The hard part is making it work with the emacs # region. # if cmd == 'drag_select': # NOTE: This is called only when you click, NOT when you drag. So if you triple click # it's called three times. # NOTE: remember the view that performed the drag_select because of the # on_selection_modified bug of using the wrong view if the same view is displayed more # than once self.drag_select_view = view # cancel isearch if necessary info = isearch.info_for(view) if info: info.done() # Set drag_count to 0 when first drag_select command occurs. if 'by' not in args: vs.drag_count = 0 else: self.drag_select_view = None if cmd in ('move', 'move_to') and vs.active_mark and not args.get('extend', False): # this is necessary or else the built-in commands (C-f, C-b) will not move when there is # an existing selection args['extend'] = True return (cmd, args) # now check for numeric argument and rewrite some commands as necessary if not vs.argument_supplied: return None if cmd in repeatable_cmds: count = vs.get_count() args.update({ 'cmd': cmd, '_times': abs(count), }) if count < 0 and 'forward' in args: args['forward'] = not args['forward'] return ("sbp_do_times", args) elif cmd == 'scroll_lines': args['amount'] *= vs.get_count() return (cmd, args) # # Post command processing: deal with active mark and resetting the numeric argument. # def on_post_text_command(self, view, cmd, args): vs = ViewState.get(view) util = CmdUtil(view) if vs.active_mark and vs.this_cmd != 'drag_select' and vs.last_cmd == 'drag_select': # if we just finished a mouse drag, make sure active mark mode is off if cmd != "context_menu": util.toggle_active_mark_mode(False) # reset numeric argument (if command starts with "sbp_" this is handled elsewhere) if not cmd.startswith("sbp_"): vs.argument_value = 0 vs.argument_supplied = False vs.last_cmd = cmd if vs.active_mark and cmd != 'drag_select': util.set_cursors(util.get_regions()) # # Process the selection if it was created from a drag_select (mouse dragging) command. # # REMIND: This iterates all related views because sublime notifies for the same view N times, if # there are N separate views open on the same buffer. # def on_selection_modified(self, active_view): for view in ViewState.most_recent_related_view(active_view): vs = ViewState.get(view) selection = view.sel() if len(selection) == 1 and vs.this_cmd == 'drag_select': cm = CmdUtil(view, vs) # # REMIND: we cannot rely on drag_count unfortunately because if you have the same # # buffer in multiple views, they each get notified. # if vs.drag_count >= 2 and not vs.active_mark: # # wait until selection is at least 1 character long before activating # region = view.sel()[0] # if region.size() >= 1: # cm.set_mark([sublime.Region(region.a, region.b)], and_selection=False) # vs.active_mark = True # elif vs.drag_count == 0: # cm.toggle_active_mark_mode(False) # vs.drag_count += 1 # update the mark ring sel = selection[0] vs.mark_ring.set([sublime.Region(sel.a, sel.a)], True) class WindowCmdWatcher(sublime_plugin.EventListener): def __init__(self, *args, **kwargs): super(WindowCmdWatcher, self).__init__(*args, **kwargs) def on_window_command(self, window, cmd, args): # REMIND - JP: Why is this code here? Can't this be done in the SbpPaneCmd class? # Check the move state of the Panes and make sure we stop recursion if cmd == "sbp_pane_cmd" and args and args['cmd'] == 'move' and 'next_pane' not in args: lm = ll.LayoutManager(window.layout()) if args["direction"] == 'next': pos = lm.next(window.active_group()) else: pos = lm.next(window.active_group(), -1) args["next_pane"] = pos return cmd, args class SbpChainCommand(SbpTextCommand): """A command that easily runs a sequence of other commands.""" def run_cmd(self, util, commands, ensure_point_visible=False): for c in commands: if 'window_command' in c: util.run_window_command(c['window_command'], c['args']) elif 'command' in c: util.run_command(c['command'], c['args']) if ensure_point_visible: util.ensure_visible(sublime.Region(util.get_point())) # # Calls run command a specified number of times. # class SbpDoTimesCommand(SbpTextCommand): def run_cmd(self, util, cmd, _times, **args): view = self.view window = view.window() visible = view.visible_region() def doit(): # for i in range(_times): # window.run_command(cmd, args) # REMIND: window.run_command is much slower and I cannot remember why I used # window.run_command... for i in range(_times): util.run_command(cmd, args) if cmd in ('redo', 'undo'): sublime.set_timeout(doit, 10) else: doit() cursor = util.get_last_cursor() if not visible.contains(cursor.b): util.ensure_visible(cursor, True) class SbpShowScopeCommand(SbpTextCommand): def run_cmd(self, util, direction=1): point = util.get_point() name = self.view.scope_name(point) region = self.view.extract_scope(point) status = "%d bytes: %s" % (region.size(), name) print(status) util.set_status(status) # # Implements moving by words, emacs style. # class SbpMoveWordCommand(SbpTextCommand): is_ensure_visible_cmd = True def find_by_class_fallback(self, view, point, forward, classes, seperators): if forward: delta = 1 end_position = self.view.size() if point > end_position: point = end_position else: delta = -1 end_position = 0 if point < end_position: point = end_position while point != end_position: if view.classify(point) & classes != 0: return point point += delta return point def find_by_class_native(self, view, point, forward, classes, separators): return view.find_by_class(point, forward, classes, separators) def run_cmd(self, util, direction=1): view = self.view separators = settings_helper.get("sbp_word_separators", default_sbp_word_separators) # determine the direction count = util.get_count() * direction forward = count > 0 count = abs(count) def call_find_by_class(point, classes, separators): ''' This is a small wrapper that maps to the right find_by_class call depending on the version of ST installed ''' return self.find_by_class_native(view, point, forward, classes, separators) def move_word0(cursor, first=False): point = cursor.b if forward: if not first or not util.is_word_char(point, True, separators): point = call_find_by_class(point, sublime.CLASS_WORD_START, separators) point = call_find_by_class(point, sublime.CLASS_WORD_END, separators) else: if not first or not util.is_word_char(point, False, separators): point = call_find_by_class(point, sublime.CLASS_WORD_END, separators) point = call_find_by_class(point, sublime.CLASS_WORD_START, separators) return sublime.Region(point, point) for c in range(count): util.for_each_cursor(move_word0, first=(c == 0)) # # Advance to the beginning (or end if going backward) word unless already positioned at a word # character. This can be used as setup for commands like upper/lower/capitalize words. This ignores # the argument count. # class SbpMoveBackToIndentation(SbpTextCommand): def run_cmd(self, util, direction=1): view = self.view def to_indentation(cursor): start = cursor.begin() while util.is_one_of(start, " \t"): start += 1 return start util.run_command("move_to", {"to": "hardbol", "extend": False}) util.for_each_cursor(to_indentation) # # Perform the uppercase/lowercase/capitalize commands on all the current cursors. If use_region is # true, the command will be applied to the regions, not to words. The regions are either existing # visible selection, OR, the emacs region(s) which might not be visible. If there are no non-empty # regions and use_region=True, this command is a no-op. # class SbpChangeCaseCommand(SbpTextCommand): re_to_underscore = re.compile('((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))') re_to_camel = re.compile(r'(?!^)_([a-zA-Z])') # re_to_camel = re.compile('((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))') def underscore(self, text): s1 = self.re_to_underscore.sub(r'_\1', text).lower() return s1 def camel(self, text): s1 = self.re_to_camel.sub(lambda m: m.group(1).upper(), text) return s1 def run_cmd(self, util, mode, use_region=False, direction=1): view = self.view count = util.get_count(True) # If cursors are not empty (e.g., visible marks) then we use the selection and we're in # region mode. If the cursors are empty but the emacs regions are not, we use them as long # as mode="regions". Otherwise, we generate regions by applying a word motion command. selection = view.sel() regions = list(selection) empty_cursors = util.all_empty_regions(regions) if empty_cursors and use_region: emacs_regions = util.get_regions() if emacs_regions and not util.all_empty_regions(emacs_regions): empty_cursors = False selection.clear() selection.add_all(emacs_regions) if empty_cursors: if use_region: return # This works first by finding the bounds of the operation by executing a forward-word # command. Then it performs the case command. But only if there are no selections or # regions to operate on. # run the move-word command so we can create a region direction = -1 if count < 0 else 1 util.run_command("sbp_move_word", {"direction": 1}) # now the selection is at the "other end" and so we create regions out of all the # cursors new_regions = [] for r, s in zip(regions, selection): new_regions.append(r.cover(s)) selection.clear() selection.add_all(new_regions) # perform the operation if mode in ('upper', 'lower'): util.run_command(mode + "_case", {}) elif mode == "title": for r in selection: util.view.replace(util.edit, r, view.substr(r).title()) elif mode in ("underscore", "camel"): fcn = self.underscore if mode == "underscore" else self.camel delta = 0 for r, s in zip(regions, selection): orig = view.substr(s) replace = fcn(orig) this_delta = len(orig) - len(replace) util.view.replace(util.edit, s, replace) # We need to adjust the size of regions by this_delta, and the position of each # region by the accumulated delta for when we put the selection back at the end. if s.b > s.a: r.b -= this_delta else: r.a -= this_delta r.b -= delta r.a -= delta delta += this_delta else: print("Unknown case setting:", mode) return if empty_cursors and count > 0: # was a word-based execution for r in new_regions: r.a = r.b = r.end() selection.clear() selection.add_all(new_regions) else: # we used the selection or the emacs regions selection.clear() selection.add_all(regions) # # A poor implementation of moving by s-expressions. The problem is it tries to use the built-in # sublime capabilities for matching brackets, and it can be tricky getting that to work. # # The real solution is to figure out how to require/request the bracket highlighter code to be # loaded and just use it. # class SbpMoveSexprCommand(SbpTextCommand): is_ensure_visible_cmd = True should_reset_target_column = True def run_cmd(self, util, direction=1): view = self.view separators = settings_helper.get("sbp_sexpr_separators", default_sbp_sexpr_separators) # determine the direction count = util.get_count() * direction forward = count > 0 count = abs(count) def advance(cursor, first): point = cursor.b if forward: limit = view.size() while point < limit: if util.is_word_char(point, True, separators): point = view.find_by_class(point, True, sublime.CLASS_WORD_END, separators) break else: ch = view.substr(point) if ch in "({[`'\"": next_point = util.to_other_end(point, direction) if next_point is not None: point = next_point break point += 1 else: while point > 0: if util.is_word_char(point, False, separators): point = view.find_by_class(point, False, sublime.CLASS_WORD_START, separators) break else: ch = view.substr(point - 1) if ch in ")}]`'\"": next_point = util.to_other_end(point, direction) if next_point is not None: point = next_point break point -= 1 cursor.a = cursor.b = point return cursor for c in range(count): util.for_each_cursor(advance, (c == 0)) # Move to paragraph depends on the functionality provided by the default # plugin in ST. So for now we use this. class SbpMoveToParagraphCommand(SbpTextCommand): def run_cmd(self, util, direction=1): view = self.view count = util.get_count() * direction forward = count > 0 count = abs(count) def advance(cursor): whitespace = '\t\x0b\x0c\r \n' if not forward: # Remove whitespace and new lines for moving forward and backward paragraphs this_region_begin = max(0, cursor.begin() - 1) while this_region_begin > 0 and view.substr(this_region_begin) in whitespace: this_region_begin -= 1 point = paragraph.expand_to_paragraph(view, this_region_begin).begin() else: this_region_end = cursor.end() limit = self.view.size() - 1 while this_region_end < limit and view.substr(this_region_end) in whitespace: this_region_end += 1 point = paragraph.expand_to_paragraph(self.view, this_region_end).end() return sublime.Region(point) for c in range(count): util.for_each_cursor(advance) s = view.sel() util.ensure_visible(s[-1] if forward else s[0]) # # A class which implements all the hard work of performing a move and then delete/kill command. It # keeps track of the cursors, then runs the command to move all the cursors, and then performs the # kill. This is used by the generic SbpMoveThenDeleteCommand command, but also commands that require # input from a panel and so are not synchronous. # class MoveThenDeleteHelper(): def __init__(self, util): self.util = util self.selection = util.view.sel() # assume forward kill direction self.forward = True # remember the current cursor positions self.orig_cursors = [s for s in self.selection] # Remember if previous was a kill command now, because if we check in self.finish() it's too # late and the answer is always yes (because of this command we're "helping"). self.last_was_kill_cmd = util.state.last_was_kill_cmd() # # Finish the operation. Sometimes we're called later with a new util object, because the whole # thing was done asynchronously (see the zap code). # def finish(self, new_util=None): util = new_util if new_util else self.util view = util.view selection = self.selection orig_cursors = self.orig_cursors # extend all cursors so we can delete the bytes new_cursors = list(selection) # but first check to see how many regions collapsed as a result of moving the cursors (e.g., # if they pile up at the end of the buffer) collapsed_regions = len(orig_cursors) - len(new_cursors) if collapsed_regions == 0: # OK - so now check to see how many collapse after we combine the beginning and end # points of each region. We do that by creating the selection object, which disallows # overlapping regions by collapsing them. selection.clear() for old,new in zip(orig_cursors, new_cursors): if old < new: selection.add(sublime.Region(old.begin(), new.end())) else: selection.add(sublime.Region(new.begin(), old.end())) collapsed_regions = len(orig_cursors) - len(selection) # OK one final check to see if any regions will overlap each other after we perform the # kill. if collapsed_regions == 0: cursors = list(selection) for i, c in enumerate(cursors[1:]): if cursors[i].contains(c.begin()): collapsed_regions += 1 if collapsed_regions != 0: # restore everything to previous state and display a popup error selection.clear() selection.add_all(orig_cursors) sublime.error_message("Couldn't perform kill operation because %d regions would have collapsed into adjacent regions!" % collapsed_regions) return # copy the text into the kill ring regions = [view.substr(r) for r in view.sel()] kill_ring.add(regions, forward=self.forward, join=self.last_was_kill_cmd) # erase the regions for region in selection: view.erase(util.edit, region) # # This command remembers all the current cursor positions, executes a command on all the cursors, # and then deletes all the data between the two. # class SbpMoveThenDeleteCommand(SbpTextCommand): is_ensure_visible_cmd = True is_kill_cmd = True def run_cmd(self, util, move_cmd, **kwargs): # prepare helper = MoveThenDeleteHelper(util) # peek at the count and update the helper's forward direction count = util.get_count(True) if 'direction' in kwargs: count *= kwargs['direction'] helper.forward = count > 0 util.view.run_command(move_cmd, kwargs) helper.finish() # # Goto the the Nth line as specified by the emacs arg count, or prompt for a line number of one # isn't specified. # class SbpGotoLineCommand(SbpTextCommand): is_ensure_visible_cmd = True def run_cmd(self, util): if util.has_prefix_arg(): util.goto_line(util.get_count()) else: util.run_window_command("show_overlay", {"overlay": "goto", "text": ":"}) class SbpUniversalArgumentCommand(SbpTextCommand): def run_cmd(self, util, value): state = util.state if not state.argument_supplied: state.argument_supplied = True if value == 'by_four': state.argument_value = 4 elif value == 'negative': state.argument_negative = True else: state.argument_value = value elif value == 'by_four': state.argument_value *= 4 elif isinstance(value, int): state.argument_value *= 10 state.argument_value += value elif value == 'negative': state.argument_value = -state.argument_value class SbpShiftRegionCommand(SbpTextCommand): """Shifts the emacs region left or right.""" def run_cmd(self, util, direction): view = self.view state = util.state regions = util.get_regions() if not regions: regions = util.get_cursors() if regions: util.save_cursors("shift") util.toggle_active_mark_mode(False) selection = self.view.sel() selection.clear() # figure out how far we're moving if state.argument_supplied: cols = direction * util.get_count() else: cols = direction * util.get_tab_size() # now we know which way and how far we're shifting, create a cursor for each line we # want to shift amount = abs(cols) count = 0 shifted = 0 for region in regions: for line in util.for_each_line(region): count += 1 if cols < 0 and (line.size() < amount or not util.is_blank(line.a, line.a + amount)): continue selection.add(sublime.Region(line.a, line.a)) shifted += 1 # shift the region if cols > 0: # shift right self.view.run_command("insert", {"characters": " " * cols}) else: for i in range(amount): self.view.run_command("right_delete") # restore the region util.restore_cursors("shift") util.set_status("Shifted %d of %d lines in the region" % (shifted, count)) # Enum definition def enum(**enums): return type('Enum', (), enums) SCROLL_TYPES = enum(TOP=1, CENTER=0, BOTTOM=2) class SbpCenterViewCommand(SbpTextCommand): ''' Reposition the view so that the line containing the cursor is at the center of the viewport, if possible. Like the corresponding Emacs command, recenter-top-bottom, this command cycles through scrolling positions. If the prefix args are used it centers given an offset else the cycling command is used This command is frequently bound to Ctrl-l. ''' last_sel = None last_scroll_type = None last_visible_region = None def rowdiff(self, start, end): r1,c1 = self.view.rowcol(start) r2,c2 = self.view.rowcol(end) return r2 - r1 def run_cmd(self, util, center_only=False): view = self.view point = util.get_point() if util.has_prefix_arg(): lines = util.get_count() line_height = view.line_height() ignore, point_offy = view.text_to_layout(point) offx, ignore = view.viewport_position() view.set_viewport_position((offx, point_offy - line_height * lines)) elif center_only: self.view.show_at_center(util.get_point()) else: self.cycle_center_view(view.sel()[0]) def cycle_center_view(self, start): if start != SbpCenterViewCommand.last_sel: SbpCenterViewCommand.last_visible_region = None SbpCenterViewCommand.last_scroll_type = SCROLL_TYPES.CENTER SbpCenterViewCommand.last_sel = start self.view.show_at_center(SbpCenterViewCommand.last_sel) return else: SbpCenterViewCommand.last_scroll_type = (SbpCenterViewCommand.last_scroll_type + 1) % 3 SbpCenterViewCommand.last_sel = start if SbpCenterViewCommand.last_visible_region == None: SbpCenterViewCommand.last_visible_region = self.view.visible_region() # Now Scroll to position if SbpCenterViewCommand.last_scroll_type == SCROLL_TYPES.CENTER: self.view.show_at_center(SbpCenterViewCommand.last_sel) elif SbpCenterViewCommand.last_scroll_type == SCROLL_TYPES.TOP: row,col = self.view.rowcol(SbpCenterViewCommand.last_visible_region.end()) diff = self.rowdiff(SbpCenterViewCommand.last_visible_region.begin(), SbpCenterViewCommand.last_sel.begin()) self.view.show(self.view.text_point(row + diff-2, 0), False) elif SbpCenterViewCommand.last_scroll_type == SCROLL_TYPES.BOTTOM: row, col = self.view.rowcol(SbpCenterViewCommand.last_visible_region.begin()) diff = self.rowdiff(SbpCenterViewCommand.last_sel.begin(), SbpCenterViewCommand.last_visible_region.end()) self.view.show(self.view.text_point(row - diff+2, 0), False) class SbpSetMarkCommand(SbpTextCommand): def run_cmd(self, util): state = util.state if state.argument_supplied: cursors = state.mark_ring.pop() if cursors: util.set_cursors(cursors) state.this_cmd = 'sbp_pop_mark' elif state.this_cmd == state.last_cmd: # at least two set mark commands in a row: turn ON the highlight util.toggle_active_mark_mode() else: # set the mark util.set_mark() if settings_helper.get("sbp_active_mark_mode", False): util.set_active_mark_mode() class SbpCancelMarkCommand(SbpTextCommand): def run_cmd(self, util): if util.state.active_mark: util.toggle_active_mark_mode() util.state.mark_ring.clear() class SbpSwapPointAndMarkCommand(SbpTextCommand): def run_cmd(self, util, toggle_active_mark_mode=False): if util.state.argument_supplied or toggle_active_mark_mode: util.toggle_active_mark_mode() else: util.swap_point_and_mark() class SbpEnableActiveMarkCommand(SbpTextCommand): def run_cmd(self, util, enabled): util.toggle_active_mark_mode(enabled) class SbpMoveToCommand(SbpTextCommand): is_ensure_visible_cmd = True def run_cmd(self, util, to, always_push_mark=False): if to == 'bof': util.push_mark_and_goto_position(0) elif to == 'eof': util.push_mark_and_goto_position(self.view.size()) elif to in ('eow', 'bow'): visible = self.view.visible_region() pos = visible.a if to == 'bow' else visible.b if always_push_mark: util.push_mark_and_goto_position(pos) else: util.set_cursors([sublime.Region(pos)]) class SbpSelectAllCommand(SbpTextCommand): def run_cmd(self, util, activate_mark=True): # set mark at current position util.set_mark() # set a mark at end of file util.set_mark(regions=[sublime.Region(self.view.size())]) # goto the top of the file util.set_point(0) if activate_mark: util.toggle_active_mark_mode(True) else: util.ensure_visible(sublime.Region(0)) class SbpOpenLineCommand(SbpTextCommand): def run_cmd(self, util): view = self.view count = util.get_count() if count > 0: for point in view.sel(): view.insert(util.edit, point.b, "\n" * count) while count > 0: view.run_command("move", {"by": "characters", "forward": False}) count -= 1 class SbpKillRegionCommand(SbpTextCommand): is_kill_cmd = True def run_cmd(self, util, is_copy=False): view = self.view regions = util.get_regions() if regions: data = [view.substr(r) for r in regions] kill_ring.add(data, True, False) if not is_copy: for r in reversed(regions): view.erase(util.edit, r) else: bytes = sum(len(d) for d in data) util.set_status("Copied %d bytes in %d regions" % (bytes, len(data))) util.toggle_active_mark_mode(False) class SbpPaneCmdCommand(SbpWindowCommand): def run_cmd(self, util, cmd, **kwargs): if cmd == 'split': self.split(self.window, util, **kwargs) elif cmd == 'grow': self.grow(self.window, util, **kwargs) elif cmd == 'destroy': self.destroy(self.window, **kwargs) elif cmd in ('move', 'switch_tab'): self.move(self.window, **kwargs) else: print("Unknown command") # # Grow the current selected window group (pane). Amount is usually 1 or -1 for grow and shrink. # def grow(self, window, util, direction): if window.num_groups() == 1: return # Prepare the layout layout = window.layout() lm = ll.LayoutManager(layout) rows = lm.rows() cols = lm.cols() cells = layout['cells'] # calculate the width and height in pixels of all the views width = height = dx = dy = 0 for g,cell in enumerate(cells): view = window.active_view_in_group(g) w,h = view.viewport_extent() width += w height += h dx += cols[cell[2]] - cols[cell[0]] dy += rows[cell[3]] - rows[cell[1]] width /= dx height /= dy current = window.active_group() view = util.view # Handle vertical moves count = util.get_count() if direction in ('g', 's'): unit = view.line_height() / height else: unit = view.em_width() / width window.set_layout(lm.extend(current, direction, unit, count)) # make sure point doesn't disappear in any active view - a delay is needed for this to work def ensure_visible(): for g in range(window.num_groups()): view = window.active_view_in_group(g) util = CmdUtil(view) util.ensure_visible(util.get_last_cursor()) sublime.set_timeout(ensure_visible, 50) # # Split the current pane in half. Clone the current view into the new pane. Refuses to split if # the resulting windows would be too small. def split(self, window, util, stype): layout = window.layout() current = window.active_group() group_count = window.num_groups() view = window.active_view() extent = view.viewport_extent() if stype == "h" and extent[1] / 2 <= 4 * view.line_height(): return False if stype == "v" and extent[0] / 2 <= 20 * view.em_width(): return False # Perform the layout lm = ll.LayoutManager(layout) if not lm.split(current, stype): return False window.set_layout(lm.build()) # couldn't find an existing view so we have to clone the current one window.run_command("clone_file") # the cloned view becomes the new active view new_view = window.active_view() # move the new view into the new group (add the end of the list) window.set_view_index(new_view, group_count, 0) # make sure the original view is the focus in the original pane window.focus_view(view) # switch to new pane window.focus_group(group_count + 1) # after a short delay make sure the two views are looking at the same area def setup_views(): selection = new_view.sel() selection.clear() selection.add_all([r for r in view.sel()]) new_view.set_viewport_position(view.viewport_position(), False) point = util.get_point() new_view.show(point) view.show(point) sublime.set_timeout(setup_views, 10) return True # # Destroy the specified pane=self|others. # def destroy(self, window, pane): if window.num_groups() == 1: return view = window.active_view() layout = window.layout() current = window.active_group() lm = ll.LayoutManager(layout) if pane == "self": views = [window.active_view_in_group(i) for i in range(window.num_groups())] del(views[current]) lm.killSelf(current) else: lm.killOther(current) views = [window.active_view()] window.set_layout(lm.build()) for i in range(window.num_groups()): view = views[i] window.focus_group(i) window.focus_view(view) window.focus_group(max(0, current - 1)) dedup_views(window) def move(self, window, **kwargs): if 'next_pane' in kwargs: window.focus_group(kwargs["next_pane"]) return direction = kwargs['direction'] if direction in ("prev", "next"): direction = 1 if direction == "next" else -1 current = window.active_group() current += direction num_groups = window.num_groups() if current < 0: current = num_groups - 1 elif current >= num_groups: current = 0 window.focus_group(current) else: view = window.active_view() group,index = window.get_view_index(view) views = window.views_in_group(group) direction = 1 if direction == "right" else -1 index += direction if index >= len(views): index = 0 elif index < 0: index = len(views) - 1 window.focus_view(views[index]) # # Close the N least recently touched views, leaving at least one view remaining. # class SbpCloseStaleViewsCommand(SbpWindowCommand): def run_cmd(self, util, n_windows=None): window = sublime.active_window() sorted = ViewState.sorted_views(window, window.active_group()) if n_windows is None or util.has_prefix_arg(): n_windows = util.get_count() while n_windows > 0 and len(sorted) > 1: view = sorted.pop() if view.is_dirty() or view.settings().get("pinned"): continue window.focus_view(view) window.run_command('close') n_windows -= 1 # go back to the original view window.focus_view(util.view) # # Toggle the pinned state of the current view. # class SbpToggleViewPinnedCommand(SbpTextCommand): def run_cmd(self, util): view = self.view settings = view.settings() pinned = settings.get("pinned", False) settings.set("pinned", not pinned) update_pinned_status(view) # # Closes the current view and selects the most recently used one in its place. This is almost like # kill buffer in emacs but if another view is displaying this file, it will still exist there. In # short, this is like closing a tab but rather than selecting an adjacent tab, it selects the most # recently used "buffer". # class SbpCloseCurrentViewCommand(SbpWindowCommand): def run_cmd(self, util, n_windows=10): window = sublime.active_window() sorted = ViewState.sorted_views(window, window.active_group()) if len(sorted) > 0: view = sorted.pop(0) window.focus_view(view) window.run_command('close') if len(sorted) > 0: window.focus_view(sorted[0]) else: window.run_command('close') # # Exists only to support kill-line with multiple cursors. # class SbpMoveForKillLineCommand(SbpTextCommand): def run_cmd(self, util, **kwargs): view = self.view state = util.state line_mode = state.argument_supplied count = util.get_count() def advance(cursor): start = cursor.b text,index,region = util.get_line_info(start) if line_mode: # go down N lines for i in range(abs(count)): view.run_command("move", {"by": "lines", "forward": count > 0}) end = util.get_point() if count != 0 and region.contains(end): # same line we started on - must be on the last line of the file end = region.end() if count > 0 else region.begin() else: # beginning of the line we ended up on end = view.line(util.get_point()).begin() util.set_cursors(sublime.Region(end)) else: end = region.end() # check if line is blank from here to the end and if so, delete the \n as well import re if re.match(r'[ \t]*$', text[index:]) and end < util.view.size(): end += 1 return sublime.Region(end, end) util.for_each_cursor(advance) # # Emacs Yank and Yank Pop commands. # class SbpYankCommand(SbpTextCommand): def run_cmd(self, util, pop=0, index=None): if pop and util.state.last_cmd != 'sbp_yank': util.set_status("Previous command was not yank!") return view = self.view # Get the cursors as selection, because if there is a selection we want to replace it with # what we're yanking. cursors = list(view.sel()) data = kill_ring.get_current(len(cursors), pop, index) if not data: return if pop != 0: # erase existing regions regions = util.get_regions() if not regions: return for r in reversed(regions): view.erase(util.edit, r) # fetch updated cursors cursors = util.get_cursors() for region, data in reversed(list(zip(cursors, data))): view.replace(util.edit, region, data) util.state.mark_ring.set(util.get_cursors(begin=True), True) util.make_cursors_empty() util.ensure_visible(util.get_last_cursor()) # # Like the yank command except it displays a menu of all the kills and lets you choose which one to # yank. # class SbpChooseAndYank(SbpTextCommand): def run_cmd(self, util, all_cursors=False): # items is an array of (index, text) pairs items = kill_ring.get_popup_sample(util.view) def on_done(idx): if idx >= 0: index = items[idx][0] if all_cursors: util.run_command("sbp_yank_all_cursors", {"index": index}) else: util.run_command("sbp_yank", {"index": index}) if items: sublime.active_window().show_quick_panel([item[1] for item in items], on_done) else: util.set_status('Nothing in history') # # Like the yank command except this automatically creates the number of cursors you need to handle # the yanked text. For example, if there are 10 yanked regions in the most recent kill, this command # will automatically create 10 cursors on 10 lines, and then perform the yank. # class SbpYankAllCursorsCommand(SbpTextCommand): def run_cmd(self, util, index=None): view = self.view # request the regions of text from the current kill texts = kill_ring.get_current(0, 0, index) if texts is None: util.set_status("Nothing to yank") # insert the right number of lines point = util.get_point() view.insert(util.edit, point, "\n" * len(texts)) regions = (sublime.Region(point + p) for p in range(len(texts))) selection = view.sel() selection.clear() selection.add_all(regions) view.run_command("sbp_yank") # # A special command that allows us to invoke incremental-search commands from the menu. # class SbpIncSearchFromMenuCommand(SbpTextCommand): def run_cmd(self, util, **kwargs): def doit(): util.run_command("sbp_inc_search", kwargs) sublime.set_timeout(doit, 50) class SbpIncSearchCommand(SbpTextCommand): def run_cmd(self, util, cmd=None, **kwargs): info = isearch.info_for(self.view) if info is None or cmd is None: regex = kwargs.get('regex', False) if util.state.argument_supplied: regex = not regex info = isearch.set_info_for(self.view, isearch.ISearchInfo(self.view, kwargs['forward'], regex)) info.open() else: if cmd == "next": info.next(**kwargs) elif cmd == "pop_one": info.pop() elif cmd == "pop_group": info.pop(True) elif cmd == "append_from_cursor": info.append_from_cursor() elif cmd == "keep_all": info.keep_all() elif cmd == "done": info.done() elif cmd == "quit": info.quit() elif cmd == "yank": info.input_view.run_command("sbp_yank") elif cmd == "set_search": view = info.input_view view.replace(util.edit, sublime.Region(0, view.size()), kwargs['text']) view.run_command("move_to", {"to": "eof"}) elif cmd == "history": info.history(**kwargs) else: print("Not handling cmd", cmd, kwargs) def is_visible(self, **kwargs): # REMIND: is it not possible to invoke isearch from the menu for some reason. I think the # problem is that a focus thing is happening and we're dismissing ourselves as a result. So # for now we hide it. return True class SbpIncSearchEscapeCommand(SbpTextCommand): # unregistered = True def run_cmd(self, util, next_cmd, next_args): info = isearch.info_for(self.view) info.done() if next_cmd in ("show_overlay",): sublime.active_window().run_command(next_cmd, next_args) else: info.view.run_command(next_cmd, next_args) # # Indent for tab command. If the cursor is not within the existing indent, just call reindent. If # the cursor is within the indent, move to the start of the indent and call reindent. If the cursor # was already at the indent didn't change after calling reindent, indent one more level. # class SbpTabCmdCommand(SbpTextCommand): def run_cmd(self, util, indent_on_repeat=False): point = util.get_point() indent,cursor = util.get_line_indent(point) tab_size = util.get_tab_size() if util.state.active_mark or cursor > indent: util.run_command("reindent", {}) else: if indent_on_repeat and util.state.last_cmd == util.state.this_cmd: util.run_command("indent", {}) else: # sublime gets screwy with indent if you're not currently a multiple of tab size if (indent % tab_size) != 0: delta = tab_size - (indent % tab_size) self.view.run_command("insert", {"characters": " " * delta}) if cursor < indent: util.run_command("move_to", {"to": "bol", "extend": False}) # re-indent and then if we're in the same place, indent another level util.run_command("reindent", {}) indent2, cursor2 = util.get_line_indent(point) if indent2 == indent: util.run_command("indent", {}) # # A quit command which is basically a no-op unless there are multiple cursors or a selection, in # which case it tries to pick one end or the other to make the single selection. # class SbpQuitCommand(SbpTextCommand): def run_cmd(self, util, favor_side="start"): window = self.view.window() # get all the regions regions = list(self.view.sel()) if not util.all_empty_regions(regions): util.make_cursors_empty(to_start=favor_side == "start") util.toggle_active_mark_mode(False) return # If there is a selection or multiple cursors, set point to the end of it that is visible OR # if neither the start nor end is visible, go to whichever is closest. if regions and regions[0].begin() != regions[-1].end(): start = regions[0].a end = regions[-1].b favor_start = favor_side == "start" favor_end = favor_side == "end" start_visible = util.is_visible(start) end_visible = util.is_visible(end) pos = None if not (start_visible or end_visible): # pick whichever side is closest visible = self.view.visible_region() if abs(visible.begin() - start) < abs(visible.end() - end): pos = start else: pos = end elif len(regions) > 1: if favor_start and start_visible: pos = start elif favor_end and end_visible: pos = end elif start_visible: pos = start elif end_visible: pos = end # default value for pos is the current end of the single selection if pos is None: pos = regions[-1].b else: regions = sublime.Region(pos) util.set_selection(regions) util.ensure_visible(regions) return # # Cancel the mark if it's visible and we're supposed to. # if settings_helper.get("sbp_cancel_mark_enabled", False): # if util.state.mark_ring.has_visible_mark(): util.run_command("sbp_cancel_mark") # # A class which knows how to ask for a single character and then does something with it. # class AskCharOrStringBase(SbpTextCommand): def run_cmd(self, util, prompt="Type character"): self.util = util self.window = self.view.window() self.count = util.get_count() self.mode = "char" # kick things off by showing the panel self.window.show_input_panel(prompt, "", self.on_done, self.on_change, None) def on_change(self, content): # on_change is notified immediate upon showing the panel before a key is even pressed if self.mode == "string" or len(content) < 1: return self.process_cursors(content) def process_cursors(self, content): util = self.util self.window.run_command("hide_panel") count = abs(self.count) for i in range(count): self.last_iteration = (i == count - 1) util.for_each_cursor(self.process_one, content) def on_done(self, content): if self.mode == "string": self.process_cursors(content) # # Jump to char command inputs one character and jumps to it. If include_char is True it goes just past # the character in question, otherwise it stops just before it. # class SbpJumpToCharCommand(AskCharOrStringBase): def run_cmd(self, util, *args, include_char=True, **kwargs): if 'prompt' not in kwargs: kwargs['prompt'] = "Jump to char: " super(SbpJumpToCharCommand, self).run_cmd(util, *args, **kwargs) self.include_char = include_char def process_one(self, cursor, ch): r = self.view.find(ch, cursor.end(), sublime.LITERAL) if r: p = r.begin() if self.include_char or not self.last_iteration: # advance one more if this is not the last_iteration or else we'll forever be stuck # at the same position p += 1 return p return None class SbpZapToCharCommand(SbpJumpToCharCommand): is_kill_cmd = True def run_cmd(self, util, **kwargs): # prepare self.helper = MoveThenDeleteHelper(util) kwargs['prompt'] = "Zap to char: " super(SbpZapToCharCommand, self).run_cmd(util, **kwargs) def process_cursors(self, content): # process cursors does all the work (of jumping) and then ... super(SbpZapToCharCommand, self).process_cursors(content) # Save the helper in view state and invoke a command to make use of it. We can't use it now # because we don't have access to a valid edit object, because this function # (process_cursors) is called asynchronously after the original text command has returned. vs = ViewState.get(self.view) vs.pending_move_then_delete_helper = self.helper # ... we can finish what we started self.window.run_command("sbp_finish_move_then_delete") # # A helper class which will simply finish what was started in a previous command that was using a # MoveThenDeleteHelper class. Some commands return before they are finished (e.g., they pop up a # panel) and so we need a new 'edit' instance to be able to perform any edit operations. This is how # we do that. # class SbpFinishMoveThenDeleteCommand(SbpTextCommand): is_kill_cmd = True def run_cmd(self, util): vs = ViewState.get(self.view) helper = vs.pending_move_then_delete_helper vs.pending_move_then_delete_helper = None helper.finish(util) # # Jump to string command inputs a string and jumps to it (case sensitive). # If include_string is True it jumps past the string being searched, # otherwise it stops just before it. # class SbpJumpToStringCommand(AskCharOrStringBase): def run_cmd(self, util, *args, include_string=True, **kwargs): if 'prompt' not in kwargs: kwargs['prompt'] = "Jump to string: " super(SbpJumpToStringCommand, self).run_cmd(util, *args, **kwargs) self.mode = "string" self.include_string = include_string def process_one(self, cursor, word): r = self.view.find(word, cursor.end(), sublime.LITERAL) if r: if self.include_string is False: # Jump to beginning of string p = r.begin() else: # Jump to after the string p = r.end() return p return None # Largely unchanged from zap to char command besides calling jump to string class SbpZapToStringCommand(SbpJumpToStringCommand): is_kill_cmd = True def run_cmd(self, util, **kwargs): # prepare self.helper = MoveThenDeleteHelper(util) kwargs['prompt'] = "Zap to string: " super(SbpZapToStringCommand, self).run_cmd(util, **kwargs) def process_cursors(self, content): # process cursors does all the work (of jumping) and then ... super(SbpZapToStringCommand, self).process_cursors(content) # Save the helper in view state and invoke a command to make use of it. We can't use it now # because we don't have access to a valid edit object, because this function # (process_cursors) is called asynchronously after the original text command has returned. vs = ViewState.get(self.view) vs.pending_move_then_delete_helper = self.helper # ... we can finish what we started self.window.run_command("sbp_finish_move_then_delete") # # A single command that does both ensuring newline at end of file AND deleting trailing whitespace. # If this is not a single command, blank spaces at the end of the file will cause an extra newline. # It's important to delete end of line whitespace before doing the end of file newline check. # class SbpTrimTrailingWhiteSpaceAndEnsureNewlineAtEofCommand(sublime_plugin.TextCommand): def run(self, edit, trim_whitespace, ensure_newline): # make sure you trim trailing whitespace FIRST and THEN check for Newline if trim_whitespace: trailing_white_space = self.view.find_all("[\t ]+$") trailing_white_space.reverse() for r in trailing_white_space: self.view.erase(edit, r) if ensure_newline: if self.view.size() > 0 and self.view.substr(self.view.size() - 1) != '\n': self.view.insert(edit, self.view.size(), "\n") class SbpPreSaveWhiteSpaceHook(sublime_plugin.EventListener): def on_pre_save(self, view): trim = settings_helper.get("sbp_trim_trailing_white_space_on_save") == True ensure = settings_helper.get("sbp_ensure_newline_at_eof_on_save") == True if trim or ensure: view.run_command("sbp_trim_trailing_white_space_and_ensure_newline_at_eof", {"trim_whitespace": trim, "ensure_newline": ensure}) # # Function to dedup views in all the groups of the specified window. This does not close views that # have changes because that causes a warning to popup. So we have a monitor which dedups views # whenever a file is saved in order to dedup them then when it's safe. # def dedup_views(window): # remember the current group so we can focus back to it when we're done group = window.active_group() for g in range(window.num_groups()): # get views for current group sorted by most recently used active = window.active_view_in_group(g) views = ViewState.sorted_views(window, g) view_by_buffer_id = dict() for v in views: if v.is_dirty(): # we cannot nuke a dirty buffer or we'll get an annoying popup continue id = v.buffer_id() if id in view_by_buffer_id: # already have a view with this buffer - so nuke this one - it's older window.focus_view(v) window.run_command('close') else: view_by_buffer_id[id] = v window.focus_view(active) window.focus_group(group) def plugin_loaded(): kill_ring.initialize() isearch.initialize() # preprocess this module preprocess_module(sys.modules[__name__])
{ "content_hash": "f55f7bad9d6ab15eb9c95ac3c3b77e6c", "timestamp": "", "source": "github", "line_count": 1587, "max_line_length": 151, "avg_line_length": 38.253308128544425, "alnum_prop": 0.5851617579231733, "repo_name": "sublime-emacs/sublemacspro", "id": "5db742cb5382128b743bb8d8e3dc72139cf2464f", "size": "60708", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "jove.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "148062" } ], "symlink_target": "" }
from django.conf.urls import patterns, include, url from django.contrib import admin urlpatterns = patterns('Alexandria.views', url(r'^admin/', include(admin.site.urls)), url(r'dialog', 'publish_dialog'), url(r'api/publish', 'publish'), url(r'api/query`', 'query'), url(r'api/request_authorization', 'request_authorization'), url(r'api/append_transfer_query', 'append_transfer_query'), )
{ "content_hash": "354381c14b97bd74360d9fc4c44f7f64", "timestamp": "", "source": "github", "line_count": 11, "max_line_length": 63, "avg_line_length": 37.54545454545455, "alnum_prop": 0.6924939467312349, "repo_name": "priestc/Alexandria", "id": "05891db6b2e9d47d79730c387353e465e53c119c", "size": "413", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Alexandria/urls.py", "mode": "33188", "license": "mit", "language": [ { "name": "JavaScript", "bytes": "1262" }, { "name": "Python", "bytes": "10382" } ], "symlink_target": "" }
import activate_venv from veevutils import banner from veevutils import parse_slide import argparse import glob import os import shutil import sys import textwrap import fnmatch import pyparsing as pp import re import eco import concurrent.futures def load_html_files(dir): html_dict = {} html_files = glob.glob(os.path.join(dir,'*.htm*')) for file in html_files: if not fnmatch.fnmatch(file, "index.*"): with open(file, 'r') as f: html_dict[os.path.basename(file)] = f.read() return html_dict def parse_header(file): src = "" with open(file) as f: src = f.read() config_reader = re.compile("^(?:---\n)(.*)(?:\n---\n)(.*)", flags=re.DOTALL) pieces = config_reader.match(src) if pieces is None: # no config found, return source return {"context": {}, "src": src} config = pieces.group(1) remaining = pieces.group(2) # pyparsing ignores newlines a whitespace by default pp.ParserElement.setDefaultWhitespaceChars("") ws = (pp.OneOrMore(" ")^pp.OneOrMore("\t")).suppress() eol = pp.OneOrMore(pp.ZeroOrMore("\r") + pp.OneOrMore("\n")) key = pp.Regex("[^0-9\s:]+") value = pp.Regex("[^\r\n]+") row = key + pp.ZeroOrMore(ws) + pp.Literal(":").suppress() + pp.ZeroOrMore(ws) + value # create the environment dict dict = {} matches = row.scanString(config) for match in matches: kv_pair = match[0] dict[kv_pair[0]] = kv_pair[1] return {"context": dict, "src": remaining} def render_slide(file, templates, partials): header = parse_header(file) slide_src = header["src"] template_name = header.get("context", {}).get("template", None) template_src = templates.get(template_name, None) template_config = {"partial": partials} if template_src is not None: # bind to contents variable template_config["contents"] = slide_src eco_ctx = eco.context_for(template_src) else: eco_ctx = eco.context_for(slide_src) # merge with header context dictionary return eco_ctx.call("render", dict(template_config, **header["context"])) def render_one(src, slide, dest, templates, partials, verbose=False): html_files = glob.glob(os.path.join(src, slide, "*.htm*")) if not os.path.exists(os.path.join(dest,slide)): os.makedirs(os.path.join(dest,slide)) for file in html_files: if not fnmatch.fnmatch(file, "*/index.htm*"): if verbose: print("Rendering %s" % file) html_basename = os.path.basename(file) if verbose: print(os.path.join(dest,slide,html_basename)) rendered = render_slide(file, templates, partials) html_path = os.path.join(dest,slide,html_basename) with open(html_path, 'w') as f: f.write(rendered) # make sure non-html slides get "rendered" too slide_info = parse_slide(os.path.join(src,slide)) if slide_info is not None: if slide_info.extension != ".htm" and slide_info.extension != ".html": shutil.copy2(slide_info.full_path, os.path.join(dest,slide)) def render_slides(src, dest, templates_dir, partials_dir, verbose=True): if verbose: print("Loading templates...") templates = load_html_files(templates_dir) if verbose: print("Loading partials...") partials = load_html_files(partials_dir) slides = next(os.walk(src))[1] # (root, dirs, files) for slide in slides: render_one(src, slide, dest, templates, partials, verbose) def render_slides_async(src, dest, templates_dir, partials_dir, verbose=True): if verbose: print("Loading templates...") templates = load_html_files(templates_dir) if verbose: print("Loading partials...") partials = load_html_files(partials_dir) slides = next(os.walk(src))[1] # (root, dirs, files) with concurrent.futures.ProcessPoolExecutor() as executor: futures = {executor.submit(render_one, src, slide, dest, templates, partials, verbose): slide for slide in slides} for future in concurrent.futures.as_completed(futures): try: data = future.result() except Exception as e: raise e def runScript(ASYNC=False): parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description = banner(subtitle="Template Renderer")) parser.add_argument("source", nargs=1, help="Source folder") parser.add_argument("destination", nargs=1, help="Destination folder") parser.add_argument("templates", nargs=1, help="Templates folder") parser.add_argument("partials", nargs=1, help="Partials folder") parser.add_argument("--notparallel", action="store_true", help="Run without concurrency") parser.add_argument("--root", nargs=1, help="Project root folder", required=False) parser.add_argument("--verbose", action="store_true", help="Chatty Cathy", required=False) if len(sys.argv) == 1: parser.print_help() return 2 else: args = parser.parse_args() VERBOSE = args.verbose ASYNC = (not args.notparallel) SOURCE = args.source[0] DEST = args.destination[0] TEMPS = args.templates[0] PARTS = args.partials[0] if args.root is not None: ROOT = args.root[0] SOURCE = os.path.join(ROOT,SOURCE) DEST = os.path.join(ROOT,DEST) TEMPS = os.path.join(ROOT,TEMPS) PARTS = os.path.join(ROOT,PARTS) if ASYNC: render_slides_async(SOURCE, DEST, TEMPS, PARTS, VERBOSE) else: render_slides(SOURCE, DEST, TEMPS, PARTS, VERBOSE) if __name__ == '__main__': sys.exit(runScript())
{ "content_hash": "c7f249fa3b93a30884bc7412d357e5da", "timestamp": "", "source": "github", "line_count": 175, "max_line_length": 116, "avg_line_length": 29.857142857142858, "alnum_prop": 0.6966507177033493, "repo_name": "drewsynan/VELVEEVA", "id": "c0f043798d63999ab3641313b0813cf2a05e5126", "size": "5248", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "lib/templates.py", "mode": "33261", "license": "bsd-3-clause", "language": [ { "name": "Dockerfile", "bytes": "875" }, { "name": "Makefile", "bytes": "429" }, { "name": "Python", "bytes": "104810" }, { "name": "Shell", "bytes": "5852" } ], "symlink_target": "" }
from django.db import models import secretballot class Link(models.Model): url = models.URLField() secretballot.enable_voting_on(Link) # used for testing field renames class WeirdLink(models.Model): url = models.URLField() secretballot.enable_voting_on(WeirdLink, votes_name='vs', upvotes_name='total_upvs', downvotes_name='total_downvs', total_name='v_total', add_vote_name='add_v', remove_vote_name='remove_v', ) # TODO?: manager name & base_manager?
{ "content_hash": "8307f8235190e046f58957271767c667", "timestamp": "", "source": "github", "line_count": 22, "max_line_length": 60, "avg_line_length": 30.727272727272727, "alnum_prop": 0.5088757396449705, "repo_name": "eugena/django-secretballot", "id": "f38575a8efe2424cc5c655e735508da89f0620ea", "size": "676", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/models.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "HTML", "bytes": "7" }, { "name": "Python", "bytes": "19363" } ], "symlink_target": "" }
import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'Category' db.create_table('finance_category', ( ('object_ptr', self.gf('django.db.models.fields.related.OneToOneField')( to=orm['core.Object'], unique=True, primary_key=True)), ('name', self.gf('django.db.models.fields.CharField') (max_length=512)), ('details', self.gf('django.db.models.fields.TextField') (null=True, blank=True)), )) db.send_create_signal('finance', ['Category']) # Adding model 'Asset' db.create_table('finance_asset', ( ('object_ptr', self.gf('django.db.models.fields.related.OneToOneField')( to=orm['core.Object'], unique=True, primary_key=True)), ('name', self.gf('django.db.models.fields.CharField') (max_length=512)), ('asset_type', self.gf('django.db.models.fields.CharField') (default='fixed', max_length=32)), ('initial_value', self.gf( 'django.db.models.fields.FloatField')(default=0)), ('lifetime', self.gf('django.db.models.fields.FloatField') (null=True, blank=True)), ('endlife_value', self.gf('django.db.models.fields.FloatField') (null=True, blank=True)), ('depreciation_rate', self.gf( 'django.db.models.fields.FloatField')(null=True, blank=True)), ('depreciation_type', self.gf('django.db.models.fields.CharField') (default='straight', max_length=32, null=True, blank=True)), ('purchase_date', self.gf('django.db.models.fields.DateField') (default=datetime.datetime.now, null=True, blank=True)), ('current_value', self.gf( 'django.db.models.fields.FloatField')(default=0)), ('owner', self.gf('django.db.models.fields.related.ForeignKey') (to=orm['identities.Contact'])), ('details', self.gf('django.db.models.fields.TextField') (null=True, blank=True)), )) db.send_create_signal('finance', ['Asset']) # Adding model 'Account' db.create_table('finance_account', ( ('object_ptr', self.gf('django.db.models.fields.related.OneToOneField')( to=orm['core.Object'], unique=True, primary_key=True)), ('name', self.gf('django.db.models.fields.CharField') (max_length=512)), ('owner', self.gf('django.db.models.fields.related.ForeignKey') (to=orm['identities.Contact'])), ('balance', self.gf( 'django.db.models.fields.FloatField')(default=0)), ('details', self.gf('django.db.models.fields.TextField') (null=True, blank=True)), )) db.send_create_signal('finance', ['Account']) # Adding model 'Equity' db.create_table('finance_equity', ( ('object_ptr', self.gf('django.db.models.fields.related.OneToOneField')( to=orm['core.Object'], unique=True, primary_key=True)), ('equity_type', self.gf('django.db.models.fields.CharField') (default='share', max_length=32)), ('issue_price', self.gf('django.db.models.fields.FloatField')()), ('sell_price', self.gf('django.db.models.fields.FloatField')()), ('issuer', self.gf('django.db.models.fields.related.ForeignKey')( related_name='finance_equity_issued', to=orm['identities.Contact'])), ('owner', self.gf('django.db.models.fields.related.ForeignKey')( related_name='finance_equity_owned', to=orm['identities.Contact'])), ('amount', self.gf( 'django.db.models.fields.PositiveIntegerField')(default=1)), ('purchase_date', self.gf('django.db.models.fields.DateField') (default=datetime.datetime.now)), ('details', self.gf('django.db.models.fields.TextField') (null=True, blank=True)), )) db.send_create_signal('finance', ['Equity']) # Adding model 'Liability' db.create_table('finance_liability', ( ('object_ptr', self.gf('django.db.models.fields.related.OneToOneField')( to=orm['core.Object'], unique=True, primary_key=True)), ('name', self.gf('django.db.models.fields.CharField') (max_length=512)), ('source', self.gf('django.db.models.fields.related.ForeignKey')( related_name='finance_liability_source', to=orm['identities.Contact'])), ('target', self.gf('django.db.models.fields.related.ForeignKey')( related_name='finance_liability_target', to=orm['identities.Contact'])), ('category', self.gf('django.db.models.fields.related.ForeignKey') (to=orm['finance.Category'], null=True, blank=True)), ('account', self.gf('django.db.models.fields.related.ForeignKey') (to=orm['finance.Account'])), ('due_date', self.gf('django.db.models.fields.DateField') (null=True, blank=True)), ('value', self.gf('django.db.models.fields.FloatField')()), ('details', self.gf( 'django.db.models.fields.TextField')(blank=True)), )) db.send_create_signal('finance', ['Liability']) # Adding model 'Transaction' db.create_table('finance_transaction', ( ('object_ptr', self.gf('django.db.models.fields.related.OneToOneField')( to=orm['core.Object'], unique=True, primary_key=True)), ('name', self.gf('django.db.models.fields.CharField') (max_length=512)), ('source', self.gf('django.db.models.fields.related.ForeignKey')( related_name='finance_transaction_source', to=orm['identities.Contact'])), ('target', self.gf('django.db.models.fields.related.ForeignKey')( related_name='finance_transaction_target', to=orm['identities.Contact'])), ('liability', self.gf('django.db.models.fields.related.ForeignKey')( to=orm['finance.Liability'], null=True, blank=True)), ('category', self.gf('django.db.models.fields.related.ForeignKey') (to=orm['finance.Category'], null=True, blank=True)), ('account', self.gf('django.db.models.fields.related.ForeignKey') (to=orm['finance.Account'])), ('datetime', self.gf('django.db.models.fields.DateTimeField') (default=datetime.datetime.now)), ('value', self.gf('django.db.models.fields.FloatField')()), ('details', self.gf( 'django.db.models.fields.TextField')(blank=True)), )) db.send_create_signal('finance', ['Transaction']) def backwards(self, orm): # Deleting model 'Category' db.delete_table('finance_category') # Deleting model 'Asset' db.delete_table('finance_asset') # Deleting model 'Account' db.delete_table('finance_account') # Deleting model 'Equity' db.delete_table('finance_equity') # Deleting model 'Liability' db.delete_table('finance_liability') # Deleting model 'Transaction' db.delete_table('finance_transaction') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'core.group': { 'Meta': {'ordering': "['name']", 'object_name': 'Group'}, 'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_set'", 'null': 'True', 'to': "orm['core.Group']"}) }, 'core.object': { 'Meta': {'object_name': 'Object'}, 'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'everybody_execute': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'everybody_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'everybody_write': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Group']"}), 'group_execute': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'group_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'group_write': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'links': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'links_rel_+'", 'null': 'True', 'to': "orm['core.Object']"}), 'nuvius_resource': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'object_name': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}), 'object_type': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}), 'subscribers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'subscriptions'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}), 'trash': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.User']"}), 'user_execute': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'user_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'user_write': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) }, 'core.user': { 'Meta': {'ordering': "['name']", 'object_name': 'User'}, 'default_group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'default_user_set'", 'null': 'True', 'to': "orm['core.Group']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'other_groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.Group']", 'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'finance.account': { 'Meta': {'ordering': "['name']", 'object_name': 'Account', '_ormbases': ['core.Object']}, 'balance': ('django.db.models.fields.FloatField', [], {'default': '0'}), 'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}), 'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['identities.Contact']"}) }, 'finance.asset': { 'Meta': {'ordering': "['-purchase_date']", 'object_name': 'Asset', '_ormbases': ['core.Object']}, 'asset_type': ('django.db.models.fields.CharField', [], {'default': "'fixed'", 'max_length': '32'}), 'current_value': ('django.db.models.fields.FloatField', [], {'default': '0'}), 'depreciation_rate': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'depreciation_type': ('django.db.models.fields.CharField', [], {'default': "'straight'", 'max_length': '32', 'null': 'True', 'blank': 'True'}), 'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'endlife_value': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'initial_value': ('django.db.models.fields.FloatField', [], {'default': '0'}), 'lifetime': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}), 'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['identities.Contact']"}), 'purchase_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'blank': 'True'}) }, 'finance.category': { 'Meta': {'object_name': 'Category', '_ormbases': ['core.Object']}, 'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}), 'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}) }, 'finance.equity': { 'Meta': {'ordering': "['-purchase_date']", 'object_name': 'Equity', '_ormbases': ['core.Object']}, 'amount': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}), 'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'equity_type': ('django.db.models.fields.CharField', [], {'default': "'share'", 'max_length': '32'}), 'issue_price': ('django.db.models.fields.FloatField', [], {}), 'issuer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'finance_equity_issued'", 'to': "orm['identities.Contact']"}), 'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'finance_equity_owned'", 'to': "orm['identities.Contact']"}), 'purchase_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime.now'}), 'sell_price': ('django.db.models.fields.FloatField', [], {}) }, 'finance.liability': { 'Meta': {'ordering': "['-due_date']", 'object_name': 'Liability', '_ormbases': ['core.Object']}, 'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['finance.Account']"}), 'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['finance.Category']", 'null': 'True', 'blank': 'True'}), 'details': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'due_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}), 'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}), 'source': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'finance_liability_source'", 'to': "orm['identities.Contact']"}), 'target': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'finance_liability_target'", 'to': "orm['identities.Contact']"}), 'value': ('django.db.models.fields.FloatField', [], {}) }, 'finance.transaction': { 'Meta': {'ordering': "['-datetime']", 'object_name': 'Transaction', '_ormbases': ['core.Object']}, 'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['finance.Account']"}), 'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['finance.Category']", 'null': 'True', 'blank': 'True'}), 'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'details': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'liability': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['finance.Liability']", 'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}), 'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}), 'source': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'finance_transaction_source'", 'to': "orm['identities.Contact']"}), 'target': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'finance_transaction_target'", 'to': "orm['identities.Contact']"}), 'value': ('django.db.models.fields.FloatField', [], {}) }, 'identities.contact': { 'Meta': {'ordering': "['name']", 'object_name': 'Contact', '_ormbases': ['core.Object']}, 'contact_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['identities.ContactType']"}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_set'", 'null': 'True', 'to': "orm['identities.Contact']"}), 'related_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Group']", 'null': 'True', 'blank': 'True'}), 'related_user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.User']", 'null': 'True', 'blank': 'True'}) }, 'identities.contactfield': { 'Meta': {'ordering': "['name']", 'object_name': 'ContactField', '_ormbases': ['core.Object']}, 'allowed_values': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'field_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}), 'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) }, 'identities.contacttype': { 'Meta': {'ordering': "['name']", 'object_name': 'ContactType', '_ormbases': ['core.Object']}, 'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'fields': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['identities.ContactField']", 'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}), 'slug': ('django.db.models.fields.CharField', [], {'max_length': '256'}) } } complete_apps = ['finance']
{ "content_hash": "e9ca37eea0b717dfbccd6d140495afb0", "timestamp": "", "source": "github", "line_count": 325, "max_line_length": 203, "avg_line_length": 72.11076923076924, "alnum_prop": 0.5596518177163339, "repo_name": "hellfish2/treeio", "id": "a64c95878d12332dc3b6a5e6fe5408a03a0673be", "size": "23567", "binary": false, "copies": "6", "ref": "refs/heads/master", "path": "finance/migrations/0001_initial.py", "mode": "33188", "license": "mit", "language": [], "symlink_target": "" }
class Duck : '''A class to define Duck properties.''' def talk( self ) : print( '\nDuck Says: Quack!' ) def coat( self ) : print( 'Duck Wears: Feathers' )
{ "content_hash": "5838203ef2e936476d6699723b917f31", "timestamp": "", "source": "github", "line_count": 9, "max_line_length": 41, "avg_line_length": 18.333333333333332, "alnum_prop": 0.6060606060606061, "repo_name": "shahjalalh/tutorials", "id": "5f97c1d1733e8acf9b26a303f48455df07a4e6da", "size": "165", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "python/3 OOP with Python/Duck.py", "mode": "33188", "license": "mit", "language": [ { "name": "ApacheConf", "bytes": "12019" }, { "name": "Batchfile", "bytes": "9424" }, { "name": "CSS", "bytes": "330117" }, { "name": "HTML", "bytes": "15939" }, { "name": "Java", "bytes": "224855" }, { "name": "JavaScript", "bytes": "790940" }, { "name": "PHP", "bytes": "24626998" }, { "name": "Python", "bytes": "13187" }, { "name": "Shell", "bytes": "12510" } ], "symlink_target": "" }
""" Slack platform for notify component. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/notify.slack/ """ import logging import voluptuous as vol from homeassistant.components.notify import ( ATTR_TARGET, PLATFORM_SCHEMA, BaseNotificationService) from homeassistant.const import ( CONF_API_KEY, CONF_USERNAME, CONF_ICON) import homeassistant.helpers.config_validation as cv REQUIREMENTS = ['slacker==0.9.42'] _LOGGER = logging.getLogger(__name__) CONF_CHANNEL = 'default_channel' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_API_KEY): cv.string, vol.Required(CONF_CHANNEL): cv.string, vol.Optional(CONF_USERNAME): cv.string, vol.Optional(CONF_ICON): cv.string, }) # pylint: disable=unused-variable def get_service(hass, config, discovery_info=None): """Get the Slack notification service.""" import slacker try: return SlackNotificationService( config[CONF_CHANNEL], config[CONF_API_KEY], config.get(CONF_USERNAME, None), config.get(CONF_ICON, None)) except slacker.Error: _LOGGER.exception("Slack authentication failed") return None class SlackNotificationService(BaseNotificationService): """Implement the notification service for Slack.""" def __init__(self, default_channel, api_token, username, icon): """Initialize the service.""" from slacker import Slacker self._default_channel = default_channel self._api_token = api_token self._username = username self._icon = icon if self._username or self._icon: self._as_user = False else: self._as_user = True self.slack = Slacker(self._api_token) self.slack.auth.test() def send_message(self, message="", **kwargs): """Send a message to a user.""" import slacker if kwargs.get(ATTR_TARGET) is None: targets = [self._default_channel] else: targets = kwargs.get(ATTR_TARGET) data = kwargs.get('data') attachments = data.get('attachments') if data else None for target in targets: try: self.slack.chat.post_message(target, message, as_user=self._as_user, username=self._username, icon_emoji=self._icon, attachments=attachments, link_names=True) except slacker.Error as err: _LOGGER.error("Could not send slack notification. Error: %s", err)
{ "content_hash": "1c373a42e65554021fe1623c47882f1b", "timestamp": "", "source": "github", "line_count": 88, "max_line_length": 77, "avg_line_length": 31.897727272727273, "alnum_prop": 0.5920912005700035, "repo_name": "morphis/home-assistant", "id": "b9f33c95d43141bdde36b687dc5e15694bd71a0f", "size": "2807", "binary": false, "copies": "8", "ref": "refs/heads/snap-support", "path": "homeassistant/components/notify/slack.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "HTML", "bytes": "1601137" }, { "name": "Python", "bytes": "5600477" }, { "name": "Ruby", "bytes": "517" }, { "name": "Shell", "bytes": "15144" } ], "symlink_target": "" }
def test_generator_fails_before_yield(): a = 1 // 0 yield lambda: True def test_generator_fails_during_iteration(): for i in [1, 2, 3, 0, 5, 6]: a = 1 // i yield lambda: True def test_ok(): pass class TestBuggyGenerators(object): def test_generator_fails_before_yield(self): a = 1 // 0 yield lambda: True def test_generator_fails_during_iteration(self): for i in [1, 2, 3, 0, 5, 6]: a = 1 // i yield lambda: True def test_ok(self): pass
{ "content_hash": "9196ac19f2be031549462fb18f298a71", "timestamp": "", "source": "github", "line_count": 29, "max_line_length": 52, "avg_line_length": 18.862068965517242, "alnum_prop": 0.5484460694698354, "repo_name": "dbbhattacharya/kitsune", "id": "00e481533b0b1d3cb7bc36383e63363c25ae1728", "size": "547", "binary": false, "copies": "10", "ref": "refs/heads/master", "path": "vendor/packages/nose/functional_tests/support/test_buggy_generators.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "ApacheConf", "bytes": "2694" }, { "name": "CSS", "bytes": "276585" }, { "name": "HTML", "bytes": "600145" }, { "name": "JavaScript", "bytes": "800276" }, { "name": "Python", "bytes": "2762831" }, { "name": "Shell", "bytes": "6720" }, { "name": "Smarty", "bytes": "1752" } ], "symlink_target": "" }
from django.contrib import admin from phoneconfirmation.models import ( MessageTooLong, PhoneCountryCode, PhoneNumber, PhoneConfirmation, SMSLog ) class SMSLogAdmin(admin.ModelAdmin): list_display = ["number", "destination", "message", "mocked", "created", "response_code"] list_filter = ["mocked", "created", "response_code"] search_fields = ["number"] def destination(self, obj): if obj.payload: return obj.payload.get("destination", "") return "" def message(self, obj): if obj.payload: return obj.payload.get("body", "") return "" class MessageTooLongAdmin(admin.ModelAdmin): list_display = ["notice_type", "created", "length"] search_fields = ["message"] list_filter = ["notice_type", "created"] def length(self, obj): return len(obj.message) admin.site.register(PhoneCountryCode) admin.site.register(PhoneNumber) admin.site.register(PhoneConfirmation) admin.site.register(SMSLog, SMSLogAdmin) admin.site.register(MessageTooLong, MessageTooLongAdmin)
{ "content_hash": "5a03e6ea2294ef3cbe89dc86c6c16432", "timestamp": "", "source": "github", "line_count": 42, "max_line_length": 93, "avg_line_length": 25.88095238095238, "alnum_prop": 0.6706531738730451, "repo_name": "jawed123/pinax-phone-confirmation", "id": "bb2c89d1f8c3511170798ed1f1b81ed14bdb04d7", "size": "1087", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "phoneconfirmation/admin.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "40" }, { "name": "Makefile", "bytes": "195" }, { "name": "Python", "bytes": "24504" } ], "symlink_target": "" }
import os from oslo_log import log as logging from oslo_service import periodic_task from trove.common import cfg from trove.common import exception from trove.common.i18n import _ from trove.common import instance as rd_ins from trove.guestagent.datastore.experimental.vertica.service import ( VerticaAppStatus) from trove.guestagent.datastore.experimental.vertica.service import VerticaApp from trove.guestagent import dbaas from trove.guestagent import volume LOG = logging.getLogger(__name__) CONF = cfg.CONF MANAGER = 'vertica' if not CONF.datastore_manager else CONF.datastore_manager class Manager(periodic_task.PeriodicTasks): def __init__(self): self.appStatus = VerticaAppStatus() self.app = VerticaApp(self.appStatus) super(Manager, self).__init__(CONF) @periodic_task.periodic_task def update_status(self, context): """Update the status of the Vertica service.""" self.appStatus.update() def rpc_ping(self, context): LOG.debug("Responding to RPC ping.") return True def prepare(self, context, packages, databases, memory_mb, users, device_path=None, mount_point=None, backup_info=None, config_contents=None, root_password=None, overrides=None, cluster_config=None, snapshot=None, path_exists_function=os.path.exists): """Makes ready DBAAS on a Guest container.""" try: LOG.info(_("Setting instance status to BUILDING.")) self.appStatus.begin_install() if device_path: device = volume.VolumeDevice(device_path) # unmount if device is already mounted device.unmount_device(device_path) device.format() if path_exists_function(mount_point): # rsync any existing data device.migrate_data(mount_point) # mount the volume device.mount(mount_point) LOG.debug("Mounted the volume.") self.app.install_if_needed(packages) self.app.prepare_for_install_vertica() if cluster_config is None: self.app.install_vertica() self.app.create_db() self.app.complete_install_or_restart() elif cluster_config['instance_type'] == "member": self.appStatus.set_status(rd_ins.ServiceStatuses.BUILD_PENDING) else: LOG.error(_("Bad cluster configuration; instance type " "given as %s.") % cluster_config['instance_type']) raise RuntimeError("Bad cluster configuration.") LOG.info(_('Completed setup of Vertica database instance.')) except Exception: LOG.exception(_('Cannot prepare Vertica database instance.')) self.appStatus.set_status(rd_ins.ServiceStatuses.FAILED) def restart(self, context): LOG.debug("Restarting the database.") self.app.restart() LOG.debug("Restarted the database.") def get_filesystem_stats(self, context, fs_path): """Gets the filesystem stats for the path given.""" LOG.debug("Finding the file-systems stats.") mount_point = CONF.get(MANAGER).mount_point return dbaas.get_filesystem_volume_stats(mount_point) def stop_db(self, context, do_not_start_on_reboot=False): LOG.debug("Stopping the database.") self.app.stop_db(do_not_start_on_reboot=do_not_start_on_reboot) LOG.debug("Stopped the database.") def mount_volume(self, context, device_path=None, mount_point=None): LOG.debug("Mounting the volume.") device = volume.VolumeDevice(device_path) device.mount(mount_point, write_to_fstab=False) LOG.debug("Mounted the volume.") def unmount_volume(self, context, device_path=None, mount_point=None): LOG.debug("Unmounting the volume.") device = volume.VolumeDevice(device_path) device.unmount(mount_point) LOG.debug("Unmounted the volume.") def resize_fs(self, context, device_path=None, mount_point=None): LOG.debug("Resizing the filesystem.") device = volume.VolumeDevice(device_path) device.resize_fs(mount_point) LOG.debug("Resized the filesystem.") def reset_configuration(self, context, configuration): """ Currently this method does nothing. This method needs to be implemented to enable rollback of flavor-resize on guestagent side. """ LOG.debug("Resetting Vertica configuration.") pass def change_passwords(self, context, users): LOG.debug("Changing password.") raise exception.DatastoreOperationNotSupported( operation='change_passwords', datastore=MANAGER) def update_attributes(self, context, username, hostname, user_attrs): LOG.debug("Updating database attributes.") raise exception.DatastoreOperationNotSupported( operation='update_attributes', datastore=MANAGER) def create_database(self, context, databases): LOG.debug("Creating database.") raise exception.DatastoreOperationNotSupported( operation='create_database', datastore=MANAGER) def create_user(self, context, users): LOG.debug("Creating user.") raise exception.DatastoreOperationNotSupported( operation='create_user', datastore=MANAGER) def delete_database(self, context, database): LOG.debug("Deleting database.") raise exception.DatastoreOperationNotSupported( operation='delete_database', datastore=MANAGER) def delete_user(self, context, user): LOG.debug("Deleting user.") raise exception.DatastoreOperationNotSupported( operation='delete_user', datastore=MANAGER) def get_user(self, context, username, hostname): LOG.debug("Getting user.") raise exception.DatastoreOperationNotSupported( operation='get_user', datastore=MANAGER) def grant_access(self, context, username, hostname, databases): LOG.debug("Granting acccess.") raise exception.DatastoreOperationNotSupported( operation='grant_access', datastore=MANAGER) def revoke_access(self, context, username, hostname, database): LOG.debug("Revoking access.") raise exception.DatastoreOperationNotSupported( operation='revoke_access', datastore=MANAGER) def list_access(self, context, username, hostname): LOG.debug("Listing access.") raise exception.DatastoreOperationNotSupported( operation='list_access', datastore=MANAGER) def list_databases(self, context, limit=None, marker=None, include_marker=False): LOG.debug("Listing databases.") raise exception.DatastoreOperationNotSupported( operation='list_databases', datastore=MANAGER) def list_users(self, context, limit=None, marker=None, include_marker=False): LOG.debug("Listing users.") raise exception.DatastoreOperationNotSupported( operation='list_users', datastore=MANAGER) def enable_root(self, context): LOG.debug("Enabling root.") return self.app.enable_root() def enable_root_with_password(self, context, root_password=None): LOG.debug("Enabling root.") return self.app.enable_root(root_password) def is_root_enabled(self, context): LOG.debug("Checking if root is enabled.") return self.app.is_root_enabled() def create_backup(self, context, backup_info): LOG.debug("Creating backup.") raise exception.DatastoreOperationNotSupported( operation='create_backup', datastore=MANAGER) def start_db_with_conf_changes(self, context, config_contents): LOG.debug("Starting with configuration changes.") self.app.start_db_with_conf_changes(config_contents) def get_public_keys(self, context, user): LOG.debug("Retrieving public keys for %s." % user) return self.app.get_public_keys(user) def authorize_public_keys(self, context, user, public_keys): LOG.debug("Authorizing public keys for %s." % user) return self.app.authorize_public_keys(user, public_keys) def install_cluster(self, context, members): try: LOG.debug("Installing cluster on members: %s." % members) self.app.install_cluster(members) LOG.debug("install_cluster call has finished.") except Exception: LOG.exception(_('Cluster installation failed.')) self.appStatus.set_status(rd_ins.ServiceStatuses.FAILED) raise def cluster_complete(self, context): LOG.debug("Cluster creation complete, starting status checks.") status = self.appStatus._get_actual_db_status() self.appStatus.set_status(status)
{ "content_hash": "350b0dd2d868b4386933c5bb77b1597d", "timestamp": "", "source": "github", "line_count": 220, "max_line_length": 79, "avg_line_length": 41.127272727272725, "alnum_prop": 0.6501989389920424, "repo_name": "fabian4/trove", "id": "95848eb16e5056f2d52e3819b2729686c347d47c", "size": "9650", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "trove/guestagent/datastore/experimental/vertica/manager.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "ApacheConf", "bytes": "88" }, { "name": "CSS", "bytes": "21914" }, { "name": "JavaScript", "bytes": "60526" }, { "name": "Python", "bytes": "3297002" }, { "name": "Shell", "bytes": "15239" }, { "name": "XSLT", "bytes": "50542" } ], "symlink_target": "" }
from __future__ import absolute_import, unicode_literals from time import time from django.contrib.contenttypes.models import ContentType from django.core.exceptions import PermissionDenied from django.core.urlresolvers import reverse from django.db.models import Count from django.http import Http404, HttpResponse, JsonResponse from django.http.request import QueryDict from django.shortcuts import get_object_or_404, redirect, render from django.template.loader import render_to_string from django.utils import timezone from django.utils.http import is_safe_url, urlquote from django.utils.safestring import mark_safe from django.utils.translation import ugettext as _ from django.views.decorators.http import require_GET, require_POST from django.views.decorators.vary import vary_on_headers from django.views.generic import View from wagtail.utils.pagination import paginate from wagtail.wagtailadmin import messages, signals from wagtail.wagtailadmin.forms import CopyForm, SearchForm from wagtail.wagtailadmin.utils import ( send_notification, user_has_any_page_permission, user_passes_test) from wagtail.wagtailcore import hooks from wagtail.wagtailcore.models import Page, PageRevision, UserPagePermissionsProxy def get_valid_next_url_from_request(request): next_url = request.POST.get('next') or request.GET.get('next') if not next_url or not is_safe_url(url=next_url, host=request.get_host()): return '' return next_url @user_passes_test(user_has_any_page_permission) def index(request, parent_page_id=None): if parent_page_id: parent_page = get_object_or_404(Page, id=parent_page_id).specific else: parent_page = Page.get_first_root_node().specific pages = parent_page.get_children().prefetch_related('content_type', 'sites_rooted_here') # Get page ordering ordering = request.GET.get('ordering', '-latest_revision_created_at') if ordering not in [ 'title', '-title', 'content_type', '-content_type', 'live', '-live', 'latest_revision_created_at', '-latest_revision_created_at', 'ord' ]: ordering = '-latest_revision_created_at' if ordering == 'ord': # preserve the native ordering from get_children() pass elif ordering == 'latest_revision_created_at': # order by oldest revision first. # Special case NULL entries - these should go at the top of the list. # Do this by annotating with Count('latest_revision_created_at'), # which returns 0 for these pages = pages.annotate( null_position=Count('latest_revision_created_at') ).order_by('null_position', 'latest_revision_created_at') elif ordering == '-latest_revision_created_at': # order by oldest revision first. # Special case NULL entries - these should go at the end of the list. pages = pages.annotate( null_position=Count('latest_revision_created_at') ).order_by('-null_position', '-latest_revision_created_at') else: pages = pages.order_by(ordering) # Don't paginate if sorting by page order - all pages must be shown to # allow drag-and-drop reordering do_paginate = ordering != 'ord' if do_paginate: # Retrieve pages in their most specific form. # Only do this for paginated listings, as this could potentially be a # very expensive operation when performed on a large queryset. pages = pages.specific() # allow hooks to modify the queryset for hook in hooks.get_hooks('construct_explorer_page_queryset'): pages = hook(parent_page, pages, request) # Pagination if do_paginate: paginator, pages = paginate(request, pages, per_page=50) return render(request, 'wagtailadmin/pages/index.html', { 'parent_page': parent_page.specific, 'ordering': ordering, 'pagination_query_params': "ordering=%s" % ordering, 'pages': pages, 'do_paginate': do_paginate, }) def add_subpage(request, parent_page_id): parent_page = get_object_or_404(Page, id=parent_page_id).specific if not parent_page.permissions_for_user(request.user).can_add_subpage(): raise PermissionDenied page_types = [ (model.get_verbose_name(), model._meta.app_label, model._meta.model_name) for model in type(parent_page).creatable_subpage_models() if model.can_create_at(parent_page) ] # sort by lower-cased version of verbose name page_types.sort(key=lambda page_type: page_type[0].lower()) if len(page_types) == 1: # Only one page type is available - redirect straight to the create form rather than # making the user choose verbose_name, app_label, model_name = page_types[0] return redirect('wagtailadmin_pages:add', app_label, model_name, parent_page.id) return render(request, 'wagtailadmin/pages/add_subpage.html', { 'parent_page': parent_page, 'page_types': page_types, 'next': get_valid_next_url_from_request(request), }) def content_type_use(request, content_type_app_name, content_type_model_name): try: content_type = ContentType.objects.get_by_natural_key(content_type_app_name, content_type_model_name) except ContentType.DoesNotExist: raise Http404 page_class = content_type.model_class() # page_class must be a Page type and not some other random model if not issubclass(page_class, Page): raise Http404 pages = page_class.objects.all() paginator, pages = paginate(request, pages, per_page=10) return render(request, 'wagtailadmin/pages/content_type_use.html', { 'pages': pages, 'app_name': content_type_app_name, 'content_type': content_type, 'page_class': page_class, }) def create(request, content_type_app_name, content_type_model_name, parent_page_id): parent_page = get_object_or_404(Page, id=parent_page_id).specific parent_page_perms = parent_page.permissions_for_user(request.user) if not parent_page_perms.can_add_subpage(): raise PermissionDenied try: content_type = ContentType.objects.get_by_natural_key(content_type_app_name, content_type_model_name) except ContentType.DoesNotExist: raise Http404 # Get class page_class = content_type.model_class() # Make sure the class is a descendant of Page if not issubclass(page_class, Page): raise Http404 # page must be in the list of allowed subpage types for this parent ID if page_class not in parent_page.creatable_subpage_models(): raise PermissionDenied if not page_class.can_create_at(parent_page): raise PermissionDenied for fn in hooks.get_hooks('before_create_page'): result = fn(request, parent_page, page_class) if hasattr(result, 'status_code'): return result page = page_class(owner=request.user) edit_handler_class = page_class.get_edit_handler() form_class = edit_handler_class.get_form_class(page_class) next_url = get_valid_next_url_from_request(request) if request.method == 'POST': form = form_class(request.POST, request.FILES, instance=page, parent_page=parent_page) if form.is_valid(): page = form.save(commit=False) is_publishing = bool(request.POST.get('action-publish')) and parent_page_perms.can_publish_subpage() is_submitting = bool(request.POST.get('action-submit')) if not is_publishing: page.live = False # Save page parent_page.add_child(instance=page) # Save revision revision = page.save_revision( user=request.user, submitted_for_moderation=is_submitting, ) # Publish if is_publishing: revision.publish() # Notifications if is_publishing: if page.go_live_at and page.go_live_at > timezone.now(): messages.success(request, _("Page '{0}' created and scheduled for publishing.").format(page.get_admin_display_title()), buttons=[ messages.button(reverse('wagtailadmin_pages:edit', args=(page.id,)), _('Edit')) ]) else: messages.success(request, _("Page '{0}' created and published.").format(page.get_admin_display_title()), buttons=[ messages.button(page.url, _('View live'), new_window=True), messages.button(reverse('wagtailadmin_pages:edit', args=(page.id,)), _('Edit')) ]) elif is_submitting: messages.success( request, _("Page '{0}' created and submitted for moderation.").format(page.get_admin_display_title()), buttons=[ messages.button( reverse('wagtailadmin_pages:view_draft', args=(page.id,)), _('View draft'), new_window=True ), messages.button( reverse('wagtailadmin_pages:edit', args=(page.id,)), _('Edit') ) ] ) if not send_notification(page.get_latest_revision().id, 'submitted', request.user.pk): messages.error(request, _("Failed to send notifications to moderators")) else: messages.success(request, _("Page '{0}' created.").format(page.get_admin_display_title())) for fn in hooks.get_hooks('after_create_page'): result = fn(request, page) if hasattr(result, 'status_code'): return result if is_publishing or is_submitting: # we're done here if next_url: # redirect back to 'next' url if present return redirect(next_url) # redirect back to the explorer return redirect('wagtailadmin_explore', page.get_parent().id) else: # Just saving - remain on edit page for further edits target_url = reverse('wagtailadmin_pages:edit', args=[page.id]) if next_url: # Ensure the 'next' url is passed through again if present target_url += '?next=%s' % urlquote(next_url) return redirect(target_url) else: messages.validation_error( request, _("The page could not be created due to validation errors"), form ) edit_handler = edit_handler_class(instance=page, form=form) has_unsaved_changes = True else: signals.init_new_page.send(sender=create, page=page, parent=parent_page) form = form_class(instance=page, parent_page=parent_page) edit_handler = edit_handler_class(instance=page, form=form) has_unsaved_changes = False return render(request, 'wagtailadmin/pages/create.html', { 'content_type': content_type, 'page_class': page_class, 'parent_page': parent_page, 'edit_handler': edit_handler, 'preview_modes': page.preview_modes, 'form': form, 'next': next_url, 'has_unsaved_changes': has_unsaved_changes, }) def edit(request, page_id): latest_revision = get_object_or_404(Page, id=page_id).get_latest_revision() page = get_object_or_404(Page, id=page_id).get_latest_revision_as_page() parent = page.get_parent() content_type = ContentType.objects.get_for_model(page) page_class = content_type.model_class() page_perms = page.permissions_for_user(request.user) if not page_perms.can_edit(): raise PermissionDenied for fn in hooks.get_hooks('before_edit_page'): result = fn(request, page) if hasattr(result, 'status_code'): return result edit_handler_class = page_class.get_edit_handler() form_class = edit_handler_class.get_form_class(page_class) next_url = get_valid_next_url_from_request(request) errors_debug = None if request.method == 'POST': form = form_class(request.POST, request.FILES, instance=page, parent_page=parent) if form.is_valid() and not page.locked: page = form.save(commit=False) is_publishing = bool(request.POST.get('action-publish')) and page_perms.can_publish() is_submitting = bool(request.POST.get('action-submit')) is_reverting = bool(request.POST.get('revision')) # If a revision ID was passed in the form, get that revision so its # date can be referenced in notification messages if is_reverting: previous_revision = get_object_or_404(page.revisions, id=request.POST.get('revision')) # Save revision revision = page.save_revision( user=request.user, submitted_for_moderation=is_submitting, ) # Publish if is_publishing: revision.publish() # Need to reload the page because the URL may have changed, and we # need the up-to-date URL for the "View Live" button. page = page.specific_class.objects.get(pk=page.pk) # Notifications if is_publishing: if page.go_live_at and page.go_live_at > timezone.now(): # Page has been scheduled for publishing in the future if is_reverting: message = _( "Revision from {0} of page '{1}' has been scheduled for publishing." ).format( previous_revision.created_at.strftime("%d %b %Y %H:%M"), page.get_admin_display_title() ) else: message = _( "Page '{0}' has been scheduled for publishing." ).format( page.get_admin_display_title() ) messages.success(request, message, buttons=[ messages.button( reverse('wagtailadmin_pages:edit', args=(page.id,)), _('Edit') ) ]) else: # Page is being published now if is_reverting: message = _( "Revision from {0} of page '{1}' has been published." ).format( previous_revision.created_at.strftime("%d %b %Y %H:%M"), page.get_admin_display_title() ) else: message = _( "Page '{0}' has been published." ).format( page.get_admin_display_title() ) messages.success(request, message, buttons=[ messages.button( page.url, _('View live'), new_window=True ), messages.button( reverse('wagtailadmin_pages:edit', args=(page_id,)), _('Edit') ) ]) elif is_submitting: message = _( "Page '{0}' has been submitted for moderation." ).format( page.get_admin_display_title() ) messages.success(request, message, buttons=[ messages.button( reverse('wagtailadmin_pages:view_draft', args=(page_id,)), _('View draft'), new_window=True ), messages.button( reverse('wagtailadmin_pages:edit', args=(page_id,)), _('Edit') ) ]) if not send_notification(page.get_latest_revision().id, 'submitted', request.user.pk): messages.error(request, _("Failed to send notifications to moderators")) else: # Saving if is_reverting: message = _( "Page '{0}' has been replaced with revision from {1}." ).format( page.get_admin_display_title(), previous_revision.created_at.strftime("%d %b %Y %H:%M") ) else: message = _( "Page '{0}' has been updated." ).format( page.get_admin_display_title() ) messages.success(request, message) for fn in hooks.get_hooks('after_edit_page'): result = fn(request, page) if hasattr(result, 'status_code'): return result if is_publishing or is_submitting: # we're done here - redirect back to the explorer if next_url: # redirect back to 'next' url if present return redirect(next_url) # redirect back to the explorer return redirect('wagtailadmin_explore', page.get_parent().id) else: # Just saving - remain on edit page for further edits target_url = reverse('wagtailadmin_pages:edit', args=[page.id]) if next_url: # Ensure the 'next' url is passed through again if present target_url += '?next=%s' % urlquote(next_url) return redirect(target_url) else: if page.locked: messages.error(request, _("The page could not be saved as it is locked")) else: messages.validation_error( request, _("The page could not be saved due to validation errors"), form ) edit_handler = edit_handler_class(instance=page, form=form) errors_debug = ( repr(edit_handler.form.errors) + repr([ (name, formset.errors) for (name, formset) in edit_handler.form.formsets.items() if formset.errors ]) ) has_unsaved_changes = True else: form = form_class(instance=page, parent_page=parent) edit_handler = edit_handler_class(instance=page, form=form) has_unsaved_changes = False # Check for revisions still undergoing moderation and warn if latest_revision and latest_revision.submitted_for_moderation: buttons = [] if page.live: buttons.append(messages.button( reverse('wagtailadmin_pages:revisions_compare', args=(page.id, 'live', latest_revision.id)), _('Compare with live version') )) messages.warning(request, _("This page is currently awaiting moderation"), buttons=buttons) return render(request, 'wagtailadmin/pages/edit.html', { 'page': page, 'content_type': content_type, 'edit_handler': edit_handler, 'errors_debug': errors_debug, 'preview_modes': page.preview_modes, 'form': form, 'next': next_url, 'has_unsaved_changes': has_unsaved_changes, }) def delete(request, page_id): page = get_object_or_404(Page, id=page_id) if not page.permissions_for_user(request.user).can_delete(): raise PermissionDenied for fn in hooks.get_hooks('before_delete_page'): result = fn(request, page) if hasattr(result, 'status_code'): return result next_url = get_valid_next_url_from_request(request) if request.method == 'POST': parent_id = page.get_parent().id page.delete() messages.success(request, _("Page '{0}' deleted.").format(page.get_admin_display_title())) for fn in hooks.get_hooks('after_delete_page'): result = fn(request, page) if hasattr(result, 'status_code'): return result if next_url: return redirect(next_url) return redirect('wagtailadmin_explore', parent_id) return render(request, 'wagtailadmin/pages/confirm_delete.html', { 'page': page, 'descendant_count': page.get_descendant_count(), 'next': next_url, }) def view_draft(request, page_id): page = get_object_or_404(Page, id=page_id).get_latest_revision_as_page() perms = page.permissions_for_user(request.user) if not (perms.can_publish() or perms.can_edit()): raise PermissionDenied return page.serve_preview(page.dummy_request(request), page.default_preview_mode) class PreviewOnEdit(View): http_method_names = ('post', 'get') preview_expiration_timeout = 60 * 60 * 24 # seconds session_key_prefix = 'wagtail-preview-' def remove_old_preview_data(self): expiration = time() - self.preview_expiration_timeout expired_keys = [ k for k, v in self.request.session.items() if k.startswith(self.session_key_prefix) and v[1] < expiration] # Removes the session key gracefully for k in expired_keys: self.request.session.pop(k) @property def session_key(self): return self.session_key_prefix + ','.join(self.args) def get_page(self): return get_object_or_404(Page, id=self.args[0]).get_latest_revision_as_page() def get_form(self): page = self.get_page() form_class = page.get_edit_handler().get_form_class(page._meta.model) parent_page = page.get_parent().specific if self.session_key not in self.request.session: # Session key not in session, returning null form return form_class(instance=page, parent_page=parent_page) post_data_dict, timestamp = self.request.session[self.session_key] # convert post_data_dict back into a QueryDict post_data = QueryDict('', mutable=True) for k, v in post_data_dict.items(): post_data.setlist(k, v) return form_class(post_data, instance=page, parent_page=parent_page) def post(self, request, *args, **kwargs): # TODO: Handle request.FILES. # Convert request.POST to a plain dict (rather than a QueryDict) so that it can be # stored without data loss in session data post_data_dict = dict(request.POST.lists()) request.session[self.session_key] = post_data_dict, time() self.remove_old_preview_data() form = self.get_form() return JsonResponse({'is_valid': form.is_valid()}) def error_response(self, page): return render(self.request, 'wagtailadmin/pages/preview_error.html', {'page': page}) def get(self, request, *args, **kwargs): # Receive the form submission that would typically be posted # to the view. If submission is valid, return the rendered page; # if not, re-render the edit form form = self.get_form() page = form.instance if form.is_valid(): form.save(commit=False) preview_mode = request.GET.get('mode', page.default_preview_mode) return page.serve_preview(page.dummy_request(request), preview_mode) return self.error_response(page) class PreviewOnCreate(PreviewOnEdit): def get_page(self): (content_type_app_name, content_type_model_name, parent_page_id) = self.args try: content_type = ContentType.objects.get_by_natural_key( content_type_app_name, content_type_model_name) except ContentType.DoesNotExist: raise Http404 page = content_type.model_class()() parent_page = get_object_or_404(Page, id=parent_page_id).specific # We need to populate treebeard's path / depth fields in order to # pass validation. We can't make these 100% consistent with the rest # of the tree without making actual database changes (such as # incrementing the parent's numchild field), but by calling treebeard's # internal _get_path method, we can set a 'realistic' value that will # hopefully enable tree traversal operations # to at least partially work. page.depth = parent_page.depth + 1 # Puts the page at the maximum possible path # for a child of `parent_page`. page.path = Page._get_children_path_interval(parent_page.path)[1] return page def get_form(self): form = super(PreviewOnCreate, self).get_form() if form.is_valid(): # Ensures our unsaved page has a suitable url. form.instance.set_url_path(form.parent_page) form.instance.full_clean() return form def unpublish(request, page_id): page = get_object_or_404(Page, id=page_id).specific user_perms = UserPagePermissionsProxy(request.user) if not user_perms.for_page(page).can_unpublish(): raise PermissionDenied next_url = get_valid_next_url_from_request(request) if request.method == 'POST': include_descendants = request.POST.get("include_descendants", False) page.unpublish() if include_descendants: live_descendant_pages = page.get_descendants().live().specific() for live_descendant_page in live_descendant_pages: if user_perms.for_page(live_descendant_page).can_unpublish(): live_descendant_page.unpublish() messages.success(request, _("Page '{0}' unpublished.").format(page.get_admin_display_title()), buttons=[ messages.button(reverse('wagtailadmin_pages:edit', args=(page.id,)), _('Edit')) ]) if next_url: return redirect(next_url) return redirect('wagtailadmin_explore', page.get_parent().id) return render(request, 'wagtailadmin/pages/confirm_unpublish.html', { 'page': page, 'next': next_url, 'live_descendant_count': page.get_descendants().live().count(), }) def move_choose_destination(request, page_to_move_id, viewed_page_id=None): page_to_move = get_object_or_404(Page, id=page_to_move_id) page_perms = page_to_move.permissions_for_user(request.user) if not page_perms.can_move(): raise PermissionDenied if viewed_page_id: viewed_page = get_object_or_404(Page, id=viewed_page_id) else: viewed_page = Page.get_first_root_node() viewed_page.can_choose = page_perms.can_move_to(viewed_page) child_pages = [] for target in viewed_page.get_children(): # can't move the page into itself or its descendants target.can_choose = page_perms.can_move_to(target) target.can_descend = ( not(target == page_to_move or target.is_child_of(page_to_move)) and target.get_children_count() ) child_pages.append(target) # Pagination paginator, child_pages = paginate(request, child_pages, per_page=50) return render(request, 'wagtailadmin/pages/move_choose_destination.html', { 'page_to_move': page_to_move, 'viewed_page': viewed_page, 'child_pages': child_pages, }) def move_confirm(request, page_to_move_id, destination_id): page_to_move = get_object_or_404(Page, id=page_to_move_id).specific destination = get_object_or_404(Page, id=destination_id) if not page_to_move.permissions_for_user(request.user).can_move_to(destination): raise PermissionDenied if request.method == 'POST': # any invalid moves *should* be caught by the permission check above, # so don't bother to catch InvalidMoveToDescendant page_to_move.move(destination, pos='last-child') messages.success(request, _("Page '{0}' moved.").format(page_to_move.get_admin_display_title()), buttons=[ messages.button(reverse('wagtailadmin_pages:edit', args=(page_to_move.id,)), _('Edit')) ]) return redirect('wagtailadmin_explore', destination.id) return render(request, 'wagtailadmin/pages/confirm_move.html', { 'page_to_move': page_to_move, 'destination': destination, }) def set_page_position(request, page_to_move_id): page_to_move = get_object_or_404(Page, id=page_to_move_id) parent_page = page_to_move.get_parent() if not parent_page.permissions_for_user(request.user).can_reorder_children(): raise PermissionDenied if request.method == 'POST': # Get position parameter position = request.GET.get('position', None) # Find page thats already in this position position_page = None if position is not None: try: position_page = parent_page.get_children()[int(position)] except IndexError: pass # No page in this position # Move page # any invalid moves *should* be caught by the permission check above, # so don't bother to catch InvalidMoveToDescendant if position_page: # If the page has been moved to the right, insert it to the # right. If left, then left. old_position = list(parent_page.get_children()).index(page_to_move) if int(position) < old_position: page_to_move.move(position_page, pos='left') elif int(position) > old_position: page_to_move.move(position_page, pos='right') else: # Move page to end page_to_move.move(parent_page, pos='last-child') return HttpResponse('') @user_passes_test(user_has_any_page_permission) def copy(request, page_id): page = Page.objects.get(id=page_id) # Parent page defaults to parent of source page parent_page = page.get_parent() # Check if the user has permission to publish subpages on the parent can_publish = parent_page.permissions_for_user(request.user).can_publish_subpage() # Create the form form = CopyForm(request.POST or None, user=request.user, page=page, can_publish=can_publish) next_url = get_valid_next_url_from_request(request) for fn in hooks.get_hooks('before_copy_page'): result = fn(request, page) if hasattr(result, 'status_code'): return result # Check if user is submitting if request.method == 'POST': # Prefill parent_page in case the form is invalid (as prepopulated value for the form field, # because ModelChoiceField seems to not fall back to the user given value) parent_page = Page.objects.get(id=request.POST['new_parent_page']) if form.is_valid(): # Receive the parent page (this should never be empty) if form.cleaned_data['new_parent_page']: parent_page = form.cleaned_data['new_parent_page'] if not page.permissions_for_user(request.user).can_copy_to(parent_page, form.cleaned_data.get('copy_subpages')): raise PermissionDenied # Re-check if the user has permission to publish subpages on the new parent can_publish = parent_page.permissions_for_user(request.user).can_publish_subpage() # Copy the page new_page = page.copy( recursive=form.cleaned_data.get('copy_subpages'), to=parent_page, update_attrs={ 'title': form.cleaned_data['new_title'], 'slug': form.cleaned_data['new_slug'], }, keep_live=(can_publish and form.cleaned_data.get('publish_copies')), user=request.user, ) # Give a success message back to the user if form.cleaned_data.get('copy_subpages'): messages.success( request, _("Page '{0}' and {1} subpages copied.").format(page.get_admin_display_title(), new_page.get_descendants().count()) ) else: messages.success(request, _("Page '{0}' copied.").format(page.get_admin_display_title())) for fn in hooks.get_hooks('after_copy_page'): result = fn(request, page, new_page) if hasattr(result, 'status_code'): return result # Redirect to explore of parent page if next_url: return redirect(next_url) return redirect('wagtailadmin_explore', parent_page.id) return render(request, 'wagtailadmin/pages/copy.html', { 'page': page, 'form': form, 'next': next_url, }) @vary_on_headers('X-Requested-With') @user_passes_test(user_has_any_page_permission) def search(request): pages = [] q = None if 'q' in request.GET: form = SearchForm(request.GET) if form.is_valid(): q = form.cleaned_data['q'] pages = Page.objects.all().prefetch_related('content_type').search(q) paginator, pages = paginate(request, pages) else: form = SearchForm() if request.is_ajax(): return render(request, "wagtailadmin/pages/search_results.html", { 'pages': pages, 'query_string': q, 'pagination_query_params': ('q=%s' % q) if q else '' }) else: return render(request, "wagtailadmin/pages/search.html", { 'search_form': form, 'pages': pages, 'query_string': q, 'pagination_query_params': ('q=%s' % q) if q else '' }) def approve_moderation(request, revision_id): revision = get_object_or_404(PageRevision, id=revision_id) if not revision.page.permissions_for_user(request.user).can_publish(): raise PermissionDenied if not revision.submitted_for_moderation: messages.error(request, _("The page '{0}' is not currently awaiting moderation.").format(revision.page.get_admin_display_title())) return redirect('wagtailadmin_home') if request.method == 'POST': revision.approve_moderation() messages.success(request, _("Page '{0}' published.").format(revision.page.get_admin_display_title()), buttons=[ messages.button(revision.page.url, _('View live'), new_window=True), messages.button(reverse('wagtailadmin_pages:edit', args=(revision.page.id,)), _('Edit')) ]) if not send_notification(revision.id, 'approved', request.user.pk): messages.error(request, _("Failed to send approval notifications")) return redirect('wagtailadmin_home') def reject_moderation(request, revision_id): revision = get_object_or_404(PageRevision, id=revision_id) if not revision.page.permissions_for_user(request.user).can_publish(): raise PermissionDenied if not revision.submitted_for_moderation: messages.error(request, _("The page '{0}' is not currently awaiting moderation.").format(revision.page.get_admin_display_title())) return redirect('wagtailadmin_home') if request.method == 'POST': revision.reject_moderation() messages.success(request, _("Page '{0}' rejected for publication.").format(revision.page.get_admin_display_title()), buttons=[ messages.button(reverse('wagtailadmin_pages:edit', args=(revision.page.id,)), _('Edit')) ]) if not send_notification(revision.id, 'rejected', request.user.pk): messages.error(request, _("Failed to send rejection notifications")) return redirect('wagtailadmin_home') @require_GET def preview_for_moderation(request, revision_id): revision = get_object_or_404(PageRevision, id=revision_id) if not revision.page.permissions_for_user(request.user).can_publish(): raise PermissionDenied if not revision.submitted_for_moderation: messages.error(request, _("The page '{0}' is not currently awaiting moderation.").format(revision.page.get_admin_display_title())) return redirect('wagtailadmin_home') page = revision.as_page_object() request.revision_id = revision_id # pass in the real user request rather than page.dummy_request(), so that request.user # and request.revision_id will be picked up by the wagtail user bar return page.serve_preview(request, page.default_preview_mode) @require_POST def lock(request, page_id): # Get the page page = get_object_or_404(Page, id=page_id).specific # Check permissions if not page.permissions_for_user(request.user).can_lock(): raise PermissionDenied # Lock the page if not page.locked: page.locked = True page.save() messages.success(request, _("Page '{0}' is now locked.").format(page.get_admin_display_title())) # Redirect redirect_to = request.POST.get('next', None) if redirect_to and is_safe_url(url=redirect_to, host=request.get_host()): return redirect(redirect_to) else: return redirect('wagtailadmin_explore', page.get_parent().id) @require_POST def unlock(request, page_id): # Get the page page = get_object_or_404(Page, id=page_id).specific # Check permissions if not page.permissions_for_user(request.user).can_lock(): raise PermissionDenied # Unlock the page if page.locked: page.locked = False page.save() messages.success(request, _("Page '{0}' is now unlocked.").format(page.get_admin_display_title())) # Redirect redirect_to = request.POST.get('next', None) if redirect_to and is_safe_url(url=redirect_to, host=request.get_host()): return redirect(redirect_to) else: return redirect('wagtailadmin_explore', page.get_parent().id) @user_passes_test(user_has_any_page_permission) def revisions_index(request, page_id): page = get_object_or_404(Page, id=page_id).specific # Get page ordering ordering = request.GET.get('ordering', '-created_at') if ordering not in ['created_at', '-created_at', ]: ordering = '-created_at' revisions = page.revisions.order_by(ordering) paginator, revisions = paginate(request, revisions) return render(request, 'wagtailadmin/pages/revisions/index.html', { 'page': page, 'ordering': ordering, 'pagination_query_params': "ordering=%s" % ordering, 'revisions': revisions, }) def revisions_revert(request, page_id, revision_id): page = get_object_or_404(Page, id=page_id).specific page_perms = page.permissions_for_user(request.user) if not page_perms.can_edit(): raise PermissionDenied revision = get_object_or_404(page.revisions, id=revision_id) revision_page = revision.as_page_object() content_type = ContentType.objects.get_for_model(page) page_class = content_type.model_class() edit_handler_class = page_class.get_edit_handler() form_class = edit_handler_class.get_form_class(page_class) form = form_class(instance=revision_page) edit_handler = edit_handler_class(instance=revision_page, form=form) user_avatar = render_to_string('wagtailadmin/shared/user_avatar.html', {'user': revision.user}) messages.warning(request, mark_safe( _("You are viewing a previous revision of this page from <b>%(created_at)s</b> by %(user)s") % { 'created_at': revision.created_at.strftime("%d %b %Y %H:%M"), 'user': user_avatar, } )) return render(request, 'wagtailadmin/pages/edit.html', { 'page': page, 'revision': revision, 'is_revision': True, 'content_type': content_type, 'edit_handler': edit_handler, 'errors_debug': None, 'preview_modes': page.preview_modes, 'form': form, # Used in unit tests }) @user_passes_test(user_has_any_page_permission) def revisions_view(request, page_id, revision_id): page = get_object_or_404(Page, id=page_id).specific revision = get_object_or_404(page.revisions, id=revision_id) revision_page = revision.as_page_object() return revision_page.serve_preview(page.dummy_request(request), page.default_preview_mode) def revisions_compare(request, page_id, revision_id_a, revision_id_b): page = get_object_or_404(Page, id=page_id).specific # Get revision to compare from if revision_id_a == 'live': if not page.live: raise Http404 revision_a = page revision_a_heading = _("Live") elif revision_id_a == 'earliest': revision_a = page.revisions.order_by('created_at', 'id').first() if revision_a: revision_a = revision_a.as_page_object() revision_a_heading = _("Earliest") else: raise Http404 else: revision_a = get_object_or_404(page.revisions, id=revision_id_a).as_page_object() revision_a_heading = str(get_object_or_404(page.revisions, id=revision_id_a).created_at) # Get revision to compare to if revision_id_b == 'live': if not page.live: raise Http404 revision_b = page revision_b_heading = _("Live") elif revision_id_b == 'latest': revision_b = page.revisions.order_by('created_at', 'id').last() if revision_b: revision_b = revision_b.as_page_object() revision_b_heading = _("Latest") else: raise Http404 else: revision_b = get_object_or_404(page.revisions, id=revision_id_b).as_page_object() revision_b_heading = str(get_object_or_404(page.revisions, id=revision_id_b).created_at) comparison = page.get_edit_handler().get_comparison() comparison = [comp(revision_a, revision_b) for comp in comparison] comparison = [comp for comp in comparison if comp.has_changed()] return render(request, 'wagtailadmin/pages/revisions/compare.html', { 'page': page, 'revision_a_heading': revision_a_heading, 'revision_a': revision_a, 'revision_b_heading': revision_b_heading, 'revision_b': revision_b, 'comparison': comparison, })
{ "content_hash": "b66c6b4ebacd589e6dca12f63e4c26a7", "timestamp": "", "source": "github", "line_count": 1124, "max_line_length": 149, "avg_line_length": 38.35231316725979, "alnum_prop": 0.594182054375058, "repo_name": "iansprice/wagtail", "id": "b66efe5a1f582a3943704e5287360242bf8aae6d", "size": "43108", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "wagtail/wagtailadmin/views/pages.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "166081" }, { "name": "HTML", "bytes": "325248" }, { "name": "JavaScript", "bytes": "177341" }, { "name": "Makefile", "bytes": "720" }, { "name": "Python", "bytes": "3102671" }, { "name": "Shell", "bytes": "7871" } ], "symlink_target": "" }
from __future__ import unicode_literals from __future__ import absolute_import import logging from .service import Service from .container import Container from .packages.docker.errors import APIError log = logging.getLogger(__name__) def sort_service_dicts(services): # Topological sort (Cormen/Tarjan algorithm). unmarked = services[:] temporary_marked = set() sorted_services = [] get_service_names = lambda links: [link.split(':')[0] for link in links] def visit(n): if n['name'] in temporary_marked: if n['name'] in get_service_names(n.get('links', [])): raise DependencyError('A service can not link to itself: %s' % n['name']) if n['name'] in n.get('volumes_from', []): raise DependencyError('A service can not mount itself as volume: %s' % n['name']) else: raise DependencyError('Circular import between %s' % ' and '.join(temporary_marked)) if n in unmarked: temporary_marked.add(n['name']) dependents = [m for m in services if (n['name'] in get_service_names(m.get('links', []))) or (n['name'] in m.get('volumes_from', []))] for m in dependents: visit(m) temporary_marked.remove(n['name']) unmarked.remove(n) sorted_services.insert(0, n) while unmarked: visit(unmarked[-1]) return sorted_services class Project(object): """ A collection of services. """ def __init__(self, name, services, client): self.name = name self.services = services self.client = client @classmethod def from_dicts(cls, name, service_dicts, client): """ Construct a ServiceCollection from a list of dicts representing services. """ project = cls(name, [], client) for service_dict in sort_service_dicts(service_dicts): links = project.get_links(service_dict) volumes_from = project.get_volumes_from(service_dict) project.services.append(Service(client=client, project=name, links=links, volumes_from=volumes_from, **service_dict)) return project @classmethod def from_config(cls, name, config, client): dicts = [] for service_name, service in list(config.items()): if not isinstance(service, dict): raise ConfigurationError('Service "%s" doesn\'t have any configuration options. All top level keys in your fig.yml must map to a dictionary of configuration options.') service['name'] = service_name dicts.append(service) return cls.from_dicts(name, dicts, client) def get_service(self, name): """ Retrieve a service by name. Raises NoSuchService if the named service does not exist. """ for service in self.services: if service.name == name: return service raise NoSuchService(name) def get_services(self, service_names=None, include_links=False): """ Returns a list of this project's services filtered by the provided list of names, or all services if service_names is None or []. If include_links is specified, returns a list including the links for service_names, in order of dependency. Preserves the original order of self.services where possible, reordering as needed to resolve links. Raises NoSuchService if any of the named services do not exist. """ if service_names is None or len(service_names) == 0: return self.get_services( service_names=[s.name for s in self.services], include_links=include_links ) else: unsorted = [self.get_service(name) for name in service_names] services = [s for s in self.services if s in unsorted] if include_links: services = reduce(self._inject_links, services, []) uniques = [] [uniques.append(s) for s in services if s not in uniques] return uniques def get_links(self, service_dict): links = [] if 'links' in service_dict: for link in service_dict.get('links', []): if ':' in link: service_name, link_name = link.split(':', 1) else: service_name, link_name = link, None try: links.append((self.get_service(service_name), link_name)) except NoSuchService: raise ConfigurationError('Service "%s" has a link to service "%s" which does not exist.' % (service_dict['name'], service_name)) del service_dict['links'] return links def get_volumes_from(self, service_dict): volumes_from = [] if 'volumes_from' in service_dict: for volume_name in service_dict.get('volumes_from', []): try: service = self.get_service(volume_name) volumes_from.append(service) except NoSuchService: try: container = Container.from_id(self.client, volume_name) volumes_from.append(container) except APIError: raise ConfigurationError('Service "%s" mounts volumes from "%s", which is not the name of a service or container.' % (service_dict['name'], volume_name)) del service_dict['volumes_from'] return volumes_from def start(self, service_names=None, **options): for service in self.get_services(service_names): service.start(**options) def stop(self, service_names=None, **options): for service in reversed(self.get_services(service_names)): service.stop(**options) def kill(self, service_names=None, **options): for service in reversed(self.get_services(service_names)): service.kill(**options) def build(self, service_names=None, no_cache=False): for service in self.get_services(service_names): if service.can_be_built(): service.build(no_cache) else: log.info('%s uses an image, skipping' % service.name) def up(self, service_names=None, start_links=True, recreate=True): running_containers = [] for service in self.get_services(service_names, include_links=start_links): if recreate: for (_, container) in service.recreate_containers(): running_containers.append(container) else: for container in service.start_or_create_containers(): running_containers.append(container) return running_containers def remove_stopped(self, service_names=None, **options): for service in self.get_services(service_names): service.remove_stopped(**options) def containers(self, service_names=None, stopped=False, one_off=False): return [Container.from_ps(self.client, container) for container in self.client.containers(all=stopped) for service in self.get_services(service_names) if service.has_container(container, one_off=one_off)] def _inject_links(self, acc, service): linked_names = service.get_linked_names() if len(linked_names) > 0: linked_services = self.get_services( service_names=linked_names, include_links=True ) else: linked_services = [] linked_services.append(service) return acc + linked_services class NoSuchService(Exception): def __init__(self, name): self.name = name self.msg = "No such service: %s" % self.name def __str__(self): return self.msg class ConfigurationError(Exception): def __init__(self, msg): self.msg = msg def __str__(self): return self.msg class DependencyError(ConfigurationError): pass
{ "content_hash": "a9ae0550059e769e7065e244e2446f3c", "timestamp": "", "source": "github", "line_count": 222, "max_line_length": 183, "avg_line_length": 36.851351351351354, "alnum_prop": 0.5889255592225889, "repo_name": "waynedovey/fig", "id": "d0c556c487c9e149121450f1929dffb3482ef84e", "size": "8181", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "fig/project.py", "mode": "33188", "license": "apache-2.0", "language": [], "symlink_target": "" }
import numpy as np from openpnm.utils import PrintableList __all__ = [ 'ParserMixin', 'LabelMixin', ] class ParserMixin: def _parse_indices(self, indices): r""" This private method accepts a list of pores or throats and returns a properly structured Numpy array of indices. Parameters ---------- indices : int or array_like This argument can accept numerous different data types including boolean masks, integers and arrays. Returns ------- A Numpy array of indices. Notes ----- This method should only be called by the method that is actually using the locations, to avoid calling it multiple times. """ if indices is None: indices = np.array([], ndmin=1, dtype=int) locs = np.array(indices, ndmin=1) # If boolean array, convert to indices if locs.dtype == bool: if np.size(locs) == self.Np: locs = self.Ps[locs] elif np.size(locs) == self.Nt: locs = self.Ts[locs] else: raise Exception('Mask of locations must be either ' + 'Np nor Nt long') locs = locs.astype(dtype=int) return locs def _parse_element(self, element, single=False): r""" This private method is used to parse the keyword \'element\' in many of the above methods. Parameters ---------- element : str or List[str] The element argument to check. If is None is recieved, then a list containing both \'pore\' and \'throat\' is returned. single : bool (default is False) When set to True only a single element is allowed and it will also return a string containing the element. Returns ------- When ``single`` is ``False`` (default) a list containing the element(s) is returned. When ``single`` is ``True`` a bare string containing the element is returned. """ if element is None: element = ['pore', 'throat'] # Convert element to a list for subsequent processing if isinstance(element, str): element = [element] # Convert 'pore.prop' and 'throat.prop' into just 'pore' and 'throat' element = [item.split('.', 1)[0] for item in element] # Make sure all are lowercase element = [item.lower() for item in element] # Deal with an plurals element = [item.rsplit('s', maxsplit=1)[0] for item in element] for item in element: if item not in ['pore', 'throat']: raise Exception('All keys must start with either pore or throat') # Remove duplicates if any _ = [element.remove(L) for L in element if element.count(L) > 1] if single: if len(element) > 1: raise Exception('Both elements recieved when single element ' + 'allowed') element = element[0] return element def _parse_labels(self, labels, element): r""" This private method is used for converting \'labels\' to a proper format, including dealing with wildcards (\*). Parameters ---------- labels : str or List[str] The label or list of labels to be parsed. Note that the \* can be used as a wildcard. Returns ------- A list of label strings, with all wildcard matches included if applicable. """ if labels is None: raise Exception('Labels cannot be None') if isinstance(labels, str): labels = [labels] # Parse the labels list parsed_labels = [] for label in labels: # Remove element from label, if present if element in label: label = label.split('.', 1)[-1] # Deal with wildcards if '*' in label: Ls = [L.split('.', 1)[-1] for L in self.labels(element=element)] if label.startswith('*'): temp = [L for L in Ls if L.endswith(label.strip('*'))] if label.endswith('*'): temp = [L for L in Ls if L.startswith(label.strip('*'))] temp = [element+'.'+L for L in temp] elif element+'.'+label in self.keys(): temp = [element+'.'+label] else: temp = [element+'.'+label] parsed_labels.extend(temp) # Remove duplicates if any _ = [parsed_labels.remove(L) for L in parsed_labels if parsed_labels.count(L) > 1] return parsed_labels def _parse_mode(self, mode, allowed=None, single=False): r""" This private method is for checking the \'mode\' used in the calling method. Parameters ---------- mode : str or List[str] The mode(s) to be parsed allowed : List[str] A list containing the allowed modes. This list is defined by the calling method. If any of the received modes are not in the allowed list an exception is raised. single : bool (default is False) Indicates if only a single mode is allowed. If this argument is True than a string is returned rather than a list of strings, which makes it easier to work with in the caller method. Returns ------- A list containing the received modes as strings, checked to ensure they are all within the allowed set (if provoided). Also, if the ``single`` argument was True, then a string is returned. """ if isinstance(mode, str): mode = [mode] for item in mode: if (allowed is not None) and (item not in allowed): raise Exception('\'mode\' must be one of the following: ' + allowed.__str__()) # Remove duplicates, if any _ = [mode.remove(L) for L in mode if mode.count(L) > 1] if single: if len(mode) > 1: raise Exception('Multiple modes received when only one mode ' + 'is allowed by this method') mode = mode[0] return mode def _parse_prop(self, propname, element): element = self._parse_element(element, single=True) if propname.split('.', 1)[0] in ['pore', 'throat']: propname = propname.split('.', 1)[-1] return element + '.' + propname class LabelMixin: """r This mixin adds functionality to the Base2 class so that boolean arrays are treated as labels """ def _get_labels(self, element, locations, mode): r""" This is the actual label getter method, but it should not be called directly. Use ``labels`` instead. """ # Parse inputs locations = self._parse_indices(locations) element = self._parse_element(element=element) # Collect list of all pore OR throat labels labels = [i for i in self.keys(mode='labels') if i.split('.', 1)[0] in element] labels.sort() labels = np.array(labels) # Convert to ndarray for following checks # Make an 2D array with locations in rows and labels in cols arr = np.vstack([self[item][locations] for item in labels]).T num_hits = np.sum(arr, axis=0) # Number of locations with each label if mode in ['or', 'union', 'any']: temp = labels[num_hits > 0] elif mode in ['and', 'intersection']: temp = labels[num_hits == locations.size] elif mode in ['xor', 'exclusive_or']: temp = labels[num_hits == 1] elif mode in ['nor', 'not', 'none']: temp = labels[num_hits == 0] elif mode in ['nand']: temp = labels[num_hits == (locations.size - 1)] elif mode in ['xnor', 'nxor']: temp = labels[num_hits > 1] else: raise Exception('Unrecognized mode:'+str(mode)) return PrintableList(temp) def labels(self, pores=[], throats=[], element=None, mode='union'): r""" Returns a list of labels present on the object Additionally, this function can return labels applied to a specified set of pores or throats Parameters ---------- element : str Controls whether pore or throat labels are returned. If empty then both are returned (default). pores (or throats) : array_like The pores (or throats) whose labels are sought. If left empty a list containing all pore and throat labels is returned. mode : str, optional Controls how the query should be performed. Only applicable when ``pores`` or ``throats`` are specified: ============== =================================================== mode meaning ============== =================================================== 'or' Returns the labels that are assigned to *any* of the given locations. Also accepts 'union' and 'any' 'and' Labels that are present on all the given locations. also accepts 'intersection' and 'all' 'xor' Labels that are present on *only one* of the given locations.Also accepts 'exclusive_or' 'nor' Labels that are *not* present on any of the given locations. Also accepts 'not' and 'none' 'nand' Labels that are present on *all but one* of the given locations 'xnor' Labels that are present on *more than one* of the given locations. ============== =================================================== Returns ------- A list containing the labels on the object. If ``pores`` or ``throats`` are given, the results are filtered according to the specified ``mode``. See Also -------- props keys Notes ----- Technically, *'nand'* and *'xnor'* should also return pores with *none* of the labels but these are not included. This makes the returned list more useful. """ # Short-circuit query when no pores or throats are given if (np.size(pores) == 0) and (np.size(throats) == 0): if element is None: element = ['pore', 'throat'] if isinstance(element, str): element = [element] labels = PrintableList() for k, v in self.items(): el, prop = k.split('.', 1) if (el in element) and (v.dtype == bool) and not prop.startswith('_'): labels.append(k) elif (np.size(pores) > 0) and (np.size(throats) > 0): raise Exception('Cannot perform label query on pores and ' + 'throats simultaneously') elif np.size(pores) > 0: labels = self._get_labels(element='pore', locations=pores, mode=mode) elif np.size(throats) > 0: labels = self._get_labels(element='throat', locations=throats, mode=mode) return sorted(labels) def set_label(self, label, pores=None, throats=None, mode='add'): r""" Creates or updates a label array Parameters ---------- label : str The label to apply to the specified locations pores : array_like A list of pore indices or a boolean mask of where given label should be added or removed (see ``mode``) throats : array_like A list of throat indices or a boolean mask of where given label should be added or removed (see ``mode``) mode : str Controls how the labels are handled. Options are: =========== ====================================================== mode description =========== ====================================================== 'add' (default) Adds the given label to the specified locations while keeping existing labels 'overwrite' Removes existing label from all locations before adding the label in the specified locations 'remove' Removes the given label from the specified locations leaving the remainder intact 'purge' Removes the specified label from the object completely. This ignores the ``pores`` and ``throats`` arguments. 'clear' Sets all the labels to ``False`` but does not remove the label array =========== ====================================================== """ self._parse_mode(mode=mode, allowed=['add', 'overwrite', 'remove', 'purge', 'clear']) if label.split('.', 1)[0] in ['pore', 'throat']: label = label.split('.', 1)[1] if (pores is not None) and (throats is not None): self.set_label(label=label, pores=pores, mode=mode) self.set_label(label=label, throats=throats, mode=mode) return elif pores is not None: locs = self._parse_indices(pores) element = 'pore' elif throats is not None: locs = self._parse_indices(throats) element = 'throat' if mode == 'add': if element + '.' + label not in self.keys(): self[element + '.' + label] = False self[element + '.' + label][locs] = True if mode == 'overwrite': self[element + '.' + label] = False self[element + '.' + label][locs] = True if mode == 'remove': self[element + '.' + label][locs] = False if mode == 'clear': self['pore' + '.' + label] = False self['throat' + '.' + label] = False if mode == 'purge': _ = self.pop('pore.' + label, None) _ = self.pop('throat.' + label, None) def _get_indices(self, element, labels, mode='or'): r""" This is the actual method for getting indices, but should not be called directly. Use ``pores`` or ``throats`` instead. """ # Parse and validate all input values. element = self._parse_element(element, single=True) labels = self._parse_labels(labels=labels, element=element) # Begin computing label array if mode in ['or', 'any', 'union']: union = np.zeros([self._count(element), ], dtype=bool) for item in labels: # Iterate over labels and collect all indices union = union + self[element+'.'+item.split('.', 1)[-1]] ind = union elif mode in ['and', 'all', 'intersection']: intersect = np.ones([self._count(element), ], dtype=bool) for item in labels: # Iterate over labels and collect all indices intersect = intersect*self[element+'.'+item.split('.', 1)[-1]] ind = intersect elif mode in ['xor', 'exclusive_or']: xor = np.zeros([self._count(element), ], dtype=int) for item in labels: # Iterate over labels and collect all indices info = self[element+'.'+item.split('.', 1)[-1]] xor = xor + np.int8(info) ind = (xor == 1) elif mode in ['nor', 'not', 'none']: nor = np.zeros([self._count(element), ], dtype=int) for item in labels: # Iterate over labels and collect all indices info = self[element+'.'+item.split('.', 1)[-1]] nor = nor + np.int8(info) ind = (nor == 0) elif mode in ['nand']: nand = np.zeros([self._count(element), ], dtype=int) for item in labels: # Iterate over labels and collect all indices info = self[element+'.'+item.split('.', 1)[-1]] nand = nand + np.int8(info) ind = (nand < len(labels)) * (nand > 0) elif mode in ['xnor', 'nxor']: xnor = np.zeros([self._count(element), ], dtype=int) for item in labels: # Iterate over labels and collect all indices info = self[element+'.'+item.split('.', 1)[-1]] xnor = xnor + np.int8(info) ind = (xnor > 1) else: raise Exception('Unsupported mode: '+mode) # Extract indices from boolean mask ind = np.where(ind)[0] ind = ind.astype(dtype=int) return ind def pores(self, labels=None, mode='or', asmask=False): r""" Returns pore indicies where given labels exist, according to the logic specified by the ``mode`` argument. Parameters ---------- labels : str or list[str] The label(s) whose pores locations are requested. This argument also accepts '*' for wildcard searches. mode : str Specifies how the query should be performed. The options are: ============== =================================================== mode meaning ============== =================================================== 'or' Returns the labels that are assigned to *any* of the given locations. Also accepts 'union' and 'any' 'and' Labels that are present on all the given locations. also accepts 'intersection' and 'all' 'xor' Labels that are present on *only one* of the given locations.Also accepts 'exclusive_or' 'nor' Labels that are *not* present on any of the given locations. Also accepts 'not' and 'none' 'nand' Labels that are present on *all but one* of the given locations 'xnor' Labels that are present on *more than one* of the given locations. ============== =================================================== asmask : bool If ``True`` then a boolean array of length Np is returned with ``True`` values indicating the pores that satisfy the query. Returns ------- A Numpy array containing pore indices filtered by the logic specified in ``mode``. See Also -------- throats Notes ----- Technically, *nand* and *xnor* should also return pores with *none* of the labels but these are not included. This makes the returned list more useful. To perform more complex or compound queries, you can opt to receive the result a a boolean mask (``asmask=True``), then manipulate the arrays manually. """ if labels is None: labels = self.name ind = self._get_indices(element='pore', labels=labels, mode=mode) if asmask: ind = self.to_mask(pores=ind) return ind def throats(self, labels=None, mode='or', asmask=False): r""" Returns throat locations where given labels exist, according to the logic specified by the ``mode`` argument. Parameters ---------- labels : str or list[str] The throat label(s) whose locations are requested. If omitted, 'all' throat inidices are returned. This argument also accepts '*' for wildcard searches. mode : str Specifies how the query should be performed. The options are: ============== =================================================== mode meaning ============== =================================================== 'or' Returns the labels that are assigned to *any* of the given locations. Also accepts 'union' and 'any' 'and' Labels that are present on all the given locations. also accepts 'intersection' and 'all' 'xor' Labels that are present on *only one* of the given locations.Also accepts 'exclusive_or' 'nor' Labels that are *not* present on any of the given locations. Also accepts 'not' and 'none' 'nand' Labels that are present on *all but one* of the given locations 'xnor' Labels that are present on *more than one* of the given locations. ============== =================================================== asmask : bool If ``True`` then a boolean array of length Nt is returned with ``True`` values indicating the throats that satisfy the query. Returns ------- A Numpy array containing throat indices filtered by the logic specified in ``mode``. See Also -------- pores """ if labels is None: labels = self.name ind = self._get_indices(element='throat', labels=labels, mode=mode) if asmask: ind = self.to_mask(throats=ind) return ind def filter_by_label(self, pores=[], throats=[], labels=None, mode='or'): r""" Returns which of the supplied pores (or throats) has the specified label(s) Parameters ---------- pores, or throats : array_like List of pores or throats to be filtered labels : list of strings The labels to apply as a filter mode : str Controls how the filter is applied. The default value is 'or'. Options include: ============== =================================================== mode meaning ============== =================================================== 'or' Returns the labels that are assigned to *any* of the given locations. Also accepts 'union' and 'any' 'and' Labels that are present on all the given locations. also accepts 'intersection' and 'all' 'xor' Labels that are present on *only one* of the given locations.Also accepts 'exclusive_or' 'nor' Labels that are *not* present on any of the given locations. Also accepts 'not' and 'none' 'nand' Labels that are present on *all but one* of the given locations 'xnor' Labels that are present on *more than one* of the given locations. ============== =================================================== Returns ------- A list of pores (or throats) that have been filtered according the given criteria. The returned list is a subset of the received list of pores (or throats). See Also -------- pores throats """ # Convert inputs to locations and element if (np.size(throats) > 0) and (np.size(pores) > 0): raise Exception('Can only filter either pores OR labels') if np.size(pores) > 0: element = 'pore' locations = self._parse_indices(pores) elif np.size(throats) > 0: element = 'throat' locations = self._parse_indices(throats) else: return np.array([], dtype=int) labels = self._parse_labels(labels=labels, element=element) labels = [element+'.'+item.split('.', 1)[-1] for item in labels] all_locs = self._get_indices(element=element, labels=labels, mode=mode) mask = self._tomask(indices=all_locs, element=element) ind = mask[locations] return locations[ind] def num_pores(self, labels='all', mode='or'): r""" Returns the number of pores of the specified labels Parameters ---------- labels : list of strings, optional The pore labels that should be included in the count. If not supplied, all pores are counted. labels : list of strings Label of pores to be returned mode : str, optional Specifies how the count should be performed. The options are: ============== =================================================== mode meaning ============== =================================================== 'or' Returns the labels that are assigned to *any* of the given locations. Also accepts 'union' and 'any' 'and' Labels that are present on all the given locations. also accepts 'intersection' and 'all' 'xor' Labels that are present on *only one* of the given locations.Also accepts 'exclusive_or' 'nor' Labels that are *not* present on any of the given locations. Also accepts 'not' and 'none' 'nand' Labels that are present on *all but one* of the given locations 'xnor' Labels that are present on *more than one* of the given locations. ============== =================================================== Returns ------- Np : int Number of pores with the specified labels See Also -------- num_throats count Notes ----- Technically, *'nand'* and *'xnor'* should also count pores with *none* of the labels, however, to make the count more useful these are not included. """ # Count number of pores of specified type Ps = self._get_indices(labels=labels, mode=mode, element='pore') Np = np.shape(Ps)[0] return Np def num_throats(self, labels='all', mode='union'): r""" Return the number of throats of the specified labels Parameters ---------- labels : list of strings, optional The throat labels that should be included in the count. If not supplied, all throats are counted. mode : str, optional Specifies how the count should be performed. The options are: ============== =================================================== mode meaning ============== =================================================== 'or' Returns the labels that are assigned to *any* of the given locations. Also accepts 'union' and 'any' 'and' Labels that are present on all the given locations. also accepts 'intersection' and 'all' 'xor' Labels that are present on *only one* of the given locations.Also accepts 'exclusive_or' 'nor' Labels that are *not* present on any of the given locations. Also accepts 'not' and 'none' 'nand' Labels that are present on *all but one* of the given locations 'xnor' Labels that are present on *more than one* of the given locations. ============== =================================================== Returns ------- Nt : int Number of throats with the specified labels See Also -------- num_pores count Notes ----- Technically, *'nand'* and *'xnor'* should also count throats with *none* of the labels, however, to make the count more useful these are not included. """ # Count number of pores of specified type Ts = self._get_indices(labels=labels, mode=mode, element='throat') Nt = np.shape(Ts)[0] return Nt
{ "content_hash": "bfa5cbd78a5d6bf804bdcefb8ffbac55", "timestamp": "", "source": "github", "line_count": 697, "max_line_length": 87, "avg_line_length": 41.67144906743185, "alnum_prop": 0.4996040626613875, "repo_name": "PMEAL/OpenPNM", "id": "d77f1ed5fb848a4ddbc9977b61f43dd93e830c47", "size": "29045", "binary": false, "copies": "1", "ref": "refs/heads/dev", "path": "openpnm/core/_mixins.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "375" }, { "name": "Python", "bytes": "1437146" } ], "symlink_target": "" }
import commands import os import random import sys import unittest from nova.adminclient import NovaAdminClient from nova.smoketests import flags from nova import vendor import paramiko nova_admin = NovaAdminClient(access_key=flags.admin_access_key, secret_key=flags.admin_secret_key, clc_ip=host) class NovaTestCase(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def connect_ssh(self, ip, key_name): # TODO(devcamcar): set a more reasonable connection timeout time key = paramiko.RSAKey.from_private_key_file('/tmp/%s.pem' % key_name) client = paramiko.SSHClient() client.load_system_host_keys() client.set_missing_host_key_policy(paramiko.WarningPolicy()) client.connect(ip, username='root', pkey=key) stdin, stdout, stderr = client.exec_command('uptime') print 'uptime: ', stdout.read() return client def can_ping(self, ip): return commands.getstatusoutput('ping -c 1 %s' % ip)[0] == 0 @property def admin(self): return nova_admin.connection_for('admin') def connection_for(self, username): return nova_admin.connection_for(username) def create_user(self, username): return nova_admin.create_user(username) def get_user(self, username): return nova_admin.get_user(username) def delete_user(self, username): return nova_admin.delete_user(username) def get_signed_zip(self, username): return nova_admin.get_zip(username) def create_key_pair(self, conn, key_name): try: os.remove('/tmp/%s.pem' % key_name) except: pass key = conn.create_key_pair(key_name) key.save('/tmp/') return key def delete_key_pair(self, conn, key_name): conn.delete_key_pair(key_name) try: os.remove('/tmp/%s.pem' % key_name) except: pass def bundle_image(self, image, kernel=False): cmd = 'euca-bundle-image -i %s' % image if kernel: cmd += ' --kernel true' status, output = commands.getstatusoutput(cmd) if status != 0: print '%s -> \n %s' % (cmd, output) raise Exception(output) return True def upload_image(self, bucket_name, image): cmd = 'euca-upload-bundle -b %s -m /tmp/%s.manifest.xml' % (bucket_name, image) status, output = commands.getstatusoutput(cmd) if status != 0: print '%s -> \n %s' % (cmd, output) raise Exception(output) return True def delete_bundle_bucket(self, bucket_name): cmd = 'euca-delete-bundle --clear -b %s' % (bucket_name) status, output = commands.getstatusoutput(cmd) if status != 0: print '%s -> \n%s' % (cmd, output) raise Exception(output) return True def register_image(self, bucket_name, manifest): conn = nova_admin.connection_for('admin') return conn.register_image("%s/%s.manifest.xml" % (bucket_name, manifest)) def setUp_test_image(self, image, kernel=False): self.bundle_image(image, kernel=kernel) bucket = "auto_test_%s" % int(random.random() * 1000000) self.upload_image(bucket, image) return self.register_image(bucket, image) def tearDown_test_image(self, conn, image_id): conn.deregister_image(image_id)
{ "content_hash": "6a12929520387810ea15e1da03e6b003", "timestamp": "", "source": "github", "line_count": 108, "max_line_length": 111, "avg_line_length": 31.90740740740741, "alnum_prop": 0.6137550783517122, "repo_name": "sorenh/cc", "id": "869e3194603e57dc2d3ef3802cc13de8a4a00407", "size": "4257", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "smoketests/novatestcase.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "PHP", "bytes": "707" }, { "name": "Python", "bytes": "398663" }, { "name": "Shell", "bytes": "12374" } ], "symlink_target": "" }
from uzentropi import Agent, asyncio, on_timer, on_event, random_id class EventDataExample(Agent): @on_timer(2) async def send_random_data(self): await self.emit('some_stuff', value=random_id()) await asyncio.sleep(1) await self.emit('more_stuff', value=random_id()) @on_event('some_stuff') async def on_some_stuff(self, frame): print(frame.name, frame.data['value']) @on_event('more_stuff', expects={'value': (str, '')}) async def on_more_stuff(self, value): print('more_stuff', value) agent = EventDataExample() agent.run()
{ "content_hash": "1cb95ba38294235bf6f1b002fb151123", "timestamp": "", "source": "github", "line_count": 21, "max_line_length": 67, "avg_line_length": 28.38095238095238, "alnum_prop": 0.6375838926174496, "repo_name": "zentropi/python-uzentropi", "id": "4aa4e9415656aadfdd1efeda5a75c78150473a16", "size": "612", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "examples/03-event-data/event_data.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "49441" }, { "name": "Shell", "bytes": "1979" } ], "symlink_target": "" }
import os import uuid import create_data_labeling_job_specialist_pool_sample import pytest import helpers API_ENDPOINT = os.getenv("DATA_LABELING_API_ENDPOINT") PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT") LOCATION = "us-central1" DATASET_ID = "1905673553261363200" SPECIALIST_POOL_ID = "5898026661995085824" INPUTS_SCHEMA_URI = "gs://google-cloud-aiplatform/schema/datalabelingjob/inputs/image_classification_1.0.0.yaml" DISPLAY_NAME = f"temp_create_data_labeling_job_specialist_pool_test_{uuid.uuid4()}" INSTRUCTIONS_GCS_URI = ( "gs://ucaip-sample-resources/images/datalabeling_instructions.pdf" ) ANNOTATION_SPEC = "rose" @pytest.fixture(scope="function", autouse=True) def teardown(teardown_data_labeling_job): yield # Creating a data labeling job for images @pytest.mark.skip(reason="Flaky job state.") def test_create_data_labeling_job_specialist_pool_sample(capsys, shared_state): dataset = f"projects/{PROJECT_ID}/locations/{LOCATION}/datasets/{DATASET_ID}" specialist_pool = f"projects/{PROJECT_ID}/locations/{LOCATION}/specialistPools/{SPECIALIST_POOL_ID}" create_data_labeling_job_specialist_pool_sample.create_data_labeling_job_specialist_pool_sample( project=PROJECT_ID, display_name=DISPLAY_NAME, dataset=dataset, specialist_pool=specialist_pool, instruction_uri=INSTRUCTIONS_GCS_URI, inputs_schema_uri=INPUTS_SCHEMA_URI, annotation_spec=ANNOTATION_SPEC, api_endpoint=API_ENDPOINT, ) out, _ = capsys.readouterr() # Save resource name of the newly created data labeing job shared_state["data_labeling_job_name"] = helpers.get_name(out)
{ "content_hash": "b37b55cceeffdaa9e566ac3a73a3e50e", "timestamp": "", "source": "github", "line_count": 49, "max_line_length": 112, "avg_line_length": 34.12244897959184, "alnum_prop": 0.7386363636363636, "repo_name": "googleapis/python-aiplatform", "id": "7ae76a05505577a841a87d66ce0d0bdebe58de52", "size": "2248", "binary": false, "copies": "2", "ref": "refs/heads/main", "path": "samples/snippets/job_service/create_data_labeling_job_specialist_pool_sample_test.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "2050" }, { "name": "Python", "bytes": "23977004" }, { "name": "Shell", "bytes": "30668" } ], "symlink_target": "" }
from django.conf.urls import url from .views import sendmail_list, sendmail_send, sendmail_view urlpatterns = [ url(r'^$', sendmail_list, name='sendmail_list'), url(r'^view/(?P<mail_uuid>[^/]+)/$', sendmail_view, name='sendmail_view'), url(r'^send/$', sendmail_send, name='sendmail_send'), ]
{ "content_hash": "bfeb2ff0e653e797b14f617cde5ac3fe", "timestamp": "", "source": "github", "line_count": 10, "max_line_length": 78, "avg_line_length": 30.7, "alnum_prop": 0.6579804560260586, "repo_name": "CasualGaming/studlan", "id": "83b2e86b3c79e44f65bc25de6b072782b4c9ed26", "size": "333", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "apps/sendmail/urls.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "9222" }, { "name": "Dockerfile", "bytes": "899" }, { "name": "HTML", "bytes": "144147" }, { "name": "JavaScript", "bytes": "18344" }, { "name": "Python", "bytes": "342146" }, { "name": "Shell", "bytes": "19209" } ], "symlink_target": "" }
""" Module containing weather forecast classes and data structures. """ import json import xml.etree.ElementTree as ET from pyowm.webapi25.xsd.xmlnsconfig import ( FORECAST_XMLNS_PREFIX, FORECAST_XMLNS_URL) from pyowm.utils import timeformatutils, xmlutils class ForecastIterator(object): """ Iterator over the list of *Weather* objects encapsulated in a *Forecast* class instance :param obj: the iterable object :type obj: object :returns: a *ForecastIterator* instance """ def __init__(self, obj): self._obj = obj self._cnt = 0 def next(self): """ Compatibility for Python 2.x, delegates to function: `__next__()` Returns the next *Weather* item :returns: the next *Weather* item """ return self.__next__() def __next__(self): """ Returns the next *Weather* item :returns: the next *Weather* item """ try: result = self._obj.get(self._cnt) self._cnt += 1 return result except IndexError: raise StopIteration class Forecast(object): """ A class encapsulating weather forecast data for a certain location and relative to a specific time interval (forecast for every three hours or for every day) :param interval: the time granularity of the forecast. May be: *'3h'* for three hours forecast or *'daily'* for daily ones :type interval: str :param reception_time: GMT UNIXtime of the forecast reception from the OWM web API :type reception_time: int :param location: the *Location* object relative to the forecast :type location: Location :param weathers: the list of *Weather* objects composing the forecast :type weathers: list :returns: a *Forecast* instance :raises: *ValueError* when negative values are provided """ def __init__(self, interval, reception_time, location, weathers): self._interval = interval if reception_time < 0: raise ValueError("'reception_time' must be greater than 0") self._reception_time = reception_time self._location = location self._weathers = weathers def __iter__(self): """ Creates a *ForecastIterator* instance :returns: a *ForecastIterator* instance """ return ForecastIterator(self) def get(self, index): """ Lookups up into the *Weather* items list for the item at the specified index :param index: the index of the *Weather* object in the list :type index: int :returns: a *Weather* object """ return self._weathers[index] def get_interval(self): """ Returns the time granularity of the forecast :returns: str """ return self._interval def set_interval(self, interval): """ Sets the time granularity of the forecast :param interval: the time granularity of the forecast, may be "3h" or "daily" :type interval: str """ self._interval = interval def get_reception_time(self, timeformat='unix'): """Returns the GMT time telling when the forecast was received from the OWM web API :param timeformat: the format for the time value. May be: '*unix*' (default) for UNIX time '*iso*' for ISO8601-formatted string in the format ``YYYY-MM-DD HH:MM:SS+00`` '*date* for ``datetime.datetime`` object instance :type timeformat: str :returns: an int or a str :raises: ValueError """ return timeformatutils.timeformat(self._reception_time, timeformat) def get_location(self): """ Returns the Location object relative to the forecast :returns: a *Location* object """ return self._location def get_weathers(self): """ Returns a copy of the *Weather* objects list composing the forecast :returns: a list of *Weather* objects """ return list(self._weathers) def count_weathers(self): """ Tells how many *Weather* items compose the forecast :returns: the *Weather* objects total """ return len(self._weathers) def to_JSON(self): """Dumps object fields into a JSON formatted string :returns: the JSON string """ return json.dumps({"interval": self._interval, "reception_time": self._reception_time, "Location": json.loads(self._location.to_JSON()), "weathers": json.loads("[" + \ ",".join([w.to_JSON() for w in self]) + "]") }) def to_XML(self, xml_declaration=True, xmlns=True): """ Dumps object fields to an XML-formatted string. The 'xml_declaration' switch enables printing of a leading standard XML line containing XML version and encoding. The 'xmlns' switch enables printing of qualified XMLNS prefixes. :param XML_declaration: if ``True`` (default) prints a leading XML declaration line :type XML_declaration: bool :param xmlns: if ``True`` (default) prints full XMLNS prefixes :type xmlns: bool :returns: an XML-formatted string """ root_node = self._to_DOM() if xmlns: xmlutils.annotate_with_XMLNS(root_node, FORECAST_XMLNS_PREFIX, FORECAST_XMLNS_URL) return xmlutils.DOM_node_to_XML(root_node, xml_declaration) def _to_DOM(self): """ Dumps object data to a fully traversable DOM representation of the object. :returns: a ``xml.etree.Element`` object """ root_node = ET.Element("forecast") interval_node = ET.SubElement(root_node, "interval") interval_node.text = self._interval reception_time_node = ET.SubElement(root_node, "reception_time") reception_time_node.text = str(self._reception_time) root_node.append(self._location._to_DOM()) weathers_node = ET.SubElement(root_node, "weathers") for weather in self: weathers_node.append(weather._to_DOM()) return root_node def __len__(self): """Redefine __len__ hook""" return self.count_weathers() def __repr__(self): return "<%s.%s - reception time=%s, interval=%s>" % (__name__, \ self.__class__.__name__, self.get_reception_time('iso'), self._interval)
{ "content_hash": "5446801702ecdbe5d1980fbc3e4ace9e", "timestamp": "", "source": "github", "line_count": 222, "max_line_length": 89, "avg_line_length": 30.486486486486488, "alnum_prop": 0.5849586288416075, "repo_name": "mpvoss/RickAndMortyWeatherTweets", "id": "52f35dede99a439ee441c6d60093b8323c789fe5", "size": "6768", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "env/lib/python3.5/site-packages/pyowm/webapi25/forecast.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "13428" } ], "symlink_target": "" }
import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'UserProfile.username' db.add_column(u'users_userprofile', 'username', self.gf('django.db.models.fields.CharField')(default=0, max_length=30), keep_default=False) def backwards(self, orm): # Deleting field 'UserProfile.username' db.delete_column(u'users_userprofile', 'username') models = { u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'users.userprofile': { 'Meta': {'object_name': 'UserProfile'}, 'college': ('django.db.models.fields.CharField', [], {'max_length': '30'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'gender': ('django.db.models.fields.CharField', [], {'max_length': '1'}), 'grade': ('django.db.models.fields.CharField', [], {'max_length': '10'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'major': ('django.db.models.fields.CharField', [], {'max_length': '20'}), 'nickname': ('django.db.models.fields.CharField', [], {'max_length': '30'}), 'school_id': ('django.db.models.fields.CharField', [], {'max_length': '12'}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'max_length': '30'}) } } complete_apps = ['users']
{ "content_hash": "b4bad6e5d8d362ec9fdad2089b31c06d", "timestamp": "", "source": "github", "line_count": 73, "max_line_length": 195, "avg_line_length": 65.79452054794521, "alnum_prop": 0.5565271705184259, "repo_name": "KarryWang/SHTP", "id": "e80dbed12a7e144ed10f9dd20f1a84ea20fefe07", "size": "4827", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "users/migrations/0002_auto__add_field_userprofile_username.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "32941" }, { "name": "HTML", "bytes": "5094" }, { "name": "Python", "bytes": "33919" } ], "symlink_target": "" }
"""Integration platform for recorder.""" from __future__ import annotations from homeassistant.const import ATTR_EDITABLE from homeassistant.core import HomeAssistant, callback from . import ATTR_MAX, ATTR_MIN, ATTR_MODE, ATTR_STEP @callback def exclude_attributes(hass: HomeAssistant) -> set[str]: """Exclude editable hint from being recorded in the database.""" return { ATTR_EDITABLE, ATTR_MAX, ATTR_MIN, ATTR_MODE, ATTR_STEP, }
{ "content_hash": "4e68ce503804bfd9e1ab64ada14379fb", "timestamp": "", "source": "github", "line_count": 19, "max_line_length": 68, "avg_line_length": 25.68421052631579, "alnum_prop": 0.6823770491803278, "repo_name": "w1ll1am23/home-assistant", "id": "05a5023be0b19322826907f5d06f033a282a8760", "size": "488", "binary": false, "copies": "4", "ref": "refs/heads/dev", "path": "homeassistant/components/input_number/recorder.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "2963" }, { "name": "PLSQL", "bytes": "840" }, { "name": "Python", "bytes": "52277012" }, { "name": "Shell", "bytes": "6252" } ], "symlink_target": "" }
import coverage from django.core.exceptions import ImproperlyConfigured try: from discover_runner import DiscoverRunner except (ImportError, ImproperlyConfigured): from django.test.runner import DiscoverRunner from discoverage.settings import (COVERAGE_OMIT_MODULES, COVERAGE_EXCLUDE_PATTERNS) from discoverage.utils import find_coverage_apps, get_all_modules class DiscoverageRunner(DiscoverRunner): def __init__(self, no_coverage=False, **kwargs): self.no_coverage = no_coverage super(DiscoverageRunner, self).__init__(**kwargs) def build_suite(self, *args, **kwargs): if not hasattr(self, '_suite'): self._suite = super(DiscoverageRunner, self).build_suite( *args, **kwargs) return self._suite def run_tests(self, test_labels, extra_tests=None, **kwargs): if self.no_coverage: return super(DiscoverageRunner, self).run_tests( test_labels, extra_tests=extra_tests, **kwargs) cov = coverage.coverage(omit=COVERAGE_OMIT_MODULES) for pattern in COVERAGE_EXCLUDE_PATTERNS: cov.exclude(pattern) cov.start() result = super(DiscoverageRunner, self).run_tests( test_labels, extra_tests, **kwargs) cov.stop() suite = self.build_suite(test_labels, extra_tests) apps = find_coverage_apps(suite) app_modules = get_all_modules(apps) if app_modules: print cov.report(app_modules) return result
{ "content_hash": "272fc2053a384d95e59581a575fdd77c", "timestamp": "", "source": "github", "line_count": 50, "max_line_length": 69, "avg_line_length": 31.48, "alnum_prop": 0.6385006353240152, "repo_name": "ryankask/django-discoverage", "id": "372edd37102195946be37d26bd4617ca5eb54c72", "size": "1574", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "discoverage/runner.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Python", "bytes": "10785" }, { "name": "Shell", "bytes": "446" } ], "symlink_target": "" }
""" Utility functions used in other test files """ from __future__ import absolute_import import os import unittest from digits.config import config_value def skipIfNotFramework(framework): """ Raises SkipTest if DIGITS_TEST_FRAMEWORK is set to something other than framework """ key = 'DIGITS_TEST_FRAMEWORK' if (key in os.environ and os.environ[key] != framework): raise unittest.SkipTest( 'Skipping because %s is "%s" and not "%s"' % (key, os.environ[key], framework)) class DatasetMixin(object): """ Mixin for dataset tests - skip if framework is not "none" """ @classmethod def setUpClass(cls): skipIfNotFramework('none') # Call super.setUpClass() unless we're the last in the class hierarchy supercls = super(DatasetMixin, cls) if hasattr(supercls, 'setUpClass'): supercls.setUpClass() class CaffeMixin(object): """ Mixin for caffe tests """ FRAMEWORK = 'caffe' @classmethod def setUpClass(cls): skipIfNotFramework('caffe') # Call super.setUpClass() unless we're the last in the class hierarchy supercls = super(CaffeMixin, cls) if hasattr(supercls, 'setUpClass'): supercls.setUpClass() class TorchMixin(object): """ Mixin for torch tests """ FRAMEWORK = 'torch' @classmethod def setUpClass(cls): skipIfNotFramework('torch') if cls.FRAMEWORK == 'torch' and not config_value('torch')['enabled']: raise unittest.SkipTest('Torch not found') # Call super.setUpClass() unless we're the last in the class hierarchy supercls = super(TorchMixin, cls) if hasattr(supercls, 'setUpClass'): supercls.setUpClass()
{ "content_hash": "9f7b489d2592ebe22e1320ebdc1811e5", "timestamp": "", "source": "github", "line_count": 69, "max_line_length": 78, "avg_line_length": 26.057971014492754, "alnum_prop": 0.6323692992213571, "repo_name": "TimZaman/DIGITS", "id": "fce564ceeebea4dd8c3e852825d9b7d07d86a02d", "size": "1862", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "digits/test_utils.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "4032" }, { "name": "HTML", "bytes": "285736" }, { "name": "JavaScript", "bytes": "45826" }, { "name": "Lua", "bytes": "110640" }, { "name": "Makefile", "bytes": "87" }, { "name": "Protocol Buffer", "bytes": "384" }, { "name": "Python", "bytes": "933415" }, { "name": "Shell", "bytes": "12431" } ], "symlink_target": "" }
import Gaffer import GafferArnold Gaffer.Metadata.registerNode( GafferArnold.ArnoldDisplacement, "description", """ Creates displacements to be applied to meshes for rendering in Arnold. A displacement consists of a shader to provide the displacement map and several attributes to control the height and other displacement properties. Use an ArnoldAttributes node to control the subdivision settings of the mesh, which in turn controls the detail of the displacement. Use a ShaderAssignment node to assign the ArnoldDisplacement to specific objects. """, "layout:activator:autoBumpVisibility", lambda node : not node["autoBump"].isSetToDefault(), plugs = { "name" : [ # The `name` plug is inherited from Shader, but unused by ArnoldDisplacement. # Hide it to avoid confusion. See comments in ArnoldDisplacement.h. "plugValueWidget:type", "", ], "map" : [ "description", """ The Arnold shader that provides the displacement map. Connect a float or colour input to displace along the object normals or a vector input to displace in a specific direction. """, "nodule:type", "GafferUI::StandardNodule", "noduleLayout:section", "left", ], "height" : [ "description", """ Controls the amount of displacement. Only used when performing displacement along the normal. """, "nodule:type", "", ], "padding" : [ "description", """ Padding added to an object's bounding box to take into account displacement. Arnold will subdivide and displace an object the first time a ray intersects its bounding box, so if the padding is too small, parts of the object will be clipped. If the padding is too large, rendertime will suffer and Arnold will emit a warning message. """, "nodule:type", "", ], "zeroValue" : [ "description", """ Defines a value that will cause no displacement to occur. For instance, if the displacement map contains a greyscale noise between 0 and 1, a zero value of 0.5 will mean that the displacement pushes into the object in some places and out in others. """, "nodule:type", "", ], "autoBump" : [ "description", """ Automatically turns the details of the displacement map into bump, wherever the mesh is not subdivided enough to properly capture them. """, "nodule:type", "", "layout:visibilityActivator", "autoBumpVisibility", ], } )
{ "content_hash": "3fb55fa8ba7e2c42137f2f4144e4dc1e", "timestamp": "", "source": "github", "line_count": 109, "max_line_length": 92, "avg_line_length": 22.513761467889907, "alnum_prop": 0.69559902200489, "repo_name": "johnhaddon/gaffer", "id": "cefeba07f09e93b436d2289cc43f81248e11d244", "size": "4257", "binary": false, "copies": "3", "ref": "refs/heads/main", "path": "python/GafferArnoldUI/ArnoldDisplacementUI.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Batchfile", "bytes": "5790" }, { "name": "C", "bytes": "61993" }, { "name": "C++", "bytes": "9571062" }, { "name": "CMake", "bytes": "85201" }, { "name": "GLSL", "bytes": "6208" }, { "name": "Python", "bytes": "10271481" }, { "name": "Ruby", "bytes": "419" }, { "name": "Shell", "bytes": "14389" } ], "symlink_target": "" }