text
stringlengths
4
1.02M
meta
dict
''' Local settings - Run in Debug mode - Use console backend for emails - Add Django Debug Toolbar - Add django-extensions as app ''' from .common import * # noqa # DEBUG # ------------------------------------------------------------------------------ DEBUG = env.bool('DJANGO_DEBUG', default=True) TEMPLATES[0]['OPTIONS']['debug'] = DEBUG # SECRET CONFIGURATION # ------------------------------------------------------------------------------ # See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key # Note: This key only used for development and testing. SECRET_KEY = env("DJANGO_SECRET_KEY", default='(zsro69s2(895gxs0t1&orpw&mya@q(bb28b@8^^$5yx_ej^%m') # Mail settings # ------------------------------------------------------------------------------ EMAIL_HOST = 'localhost' EMAIL_PORT = 1025 # CACHING # ------------------------------------------------------------------------------ CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': '' } } # django-debug-toolbar # ------------------------------------------------------------------------------ MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',) INSTALLED_APPS += ('debug_toolbar', ) INTERNAL_IPS = ('127.0.0.1', '10.0.2.2',) DEBUG_TOOLBAR_CONFIG = { 'DISABLE_PANELS': [ 'debug_toolbar.panels.redirects.RedirectsPanel', ], 'SHOW_TEMPLATE_CONTEXT': True, } # django-extensions # ------------------------------------------------------------------------------ INSTALLED_APPS += ('django_extensions', ) # TESTING # ------------------------------------------------------------------------------ TEST_RUNNER = 'django.test.runner.DiscoverRunner' ########## CELERY # In development, all tasks will be executed locally by blocking until the task returns CELERY_ALWAYS_EAGER = True ########## END CELERY # Your local stuff: Below this line define 3rd party library settings
{ "content_hash": "b69eab83672ad281dc4136543a861735", "timestamp": "", "source": "github", "line_count": 65, "max_line_length": 99, "avg_line_length": 30.16923076923077, "alnum_prop": 0.4915859255481897, "repo_name": "RohitRepo/onepunch", "id": "f3366f5aa3ad3737375c09998b08bdf827de31ef", "size": "1985", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "config/settings/local.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "1768" }, { "name": "HTML", "bytes": "20178" }, { "name": "JavaScript", "bytes": "3210" }, { "name": "Nginx", "bytes": "1095" }, { "name": "Python", "bytes": "41252" }, { "name": "Shell", "bytes": "4572" } ], "symlink_target": "" }
from rest_framework import serializers from text.models import Text class AnalyzeSerializer(serializers.Serializer): url = serializers.CharField(required=True) class TextSerializer(serializers.ModelSerializer): id = serializers.SerializerMethodField('get_id') def get_id(self, obj): return obj.task_id class Meta: model = Text exclude = ('user', 'task_id') class ResultSerializer(serializers.Serializer): success = serializers.BooleanField(required=True)
{ "content_hash": "996597f3f8e06d803261902a6ea31f83", "timestamp": "", "source": "github", "line_count": 24, "max_line_length": 53, "avg_line_length": 21.25, "alnum_prop": 0.7235294117647059, "repo_name": "blumug/texapi", "id": "239adda1946ea993212b09f93252d2a8bacad36a", "size": "510", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "text/api/serializers.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "2709" }, { "name": "HTML", "bytes": "640044" }, { "name": "JavaScript", "bytes": "45" }, { "name": "Python", "bytes": "108002" }, { "name": "Ruby", "bytes": "4967" }, { "name": "Shell", "bytes": "3761" } ], "symlink_target": "" }
from struct import unpack # custom import from .DataTypeConverters import readBew, readVar, varLen class RawInstreamFile: """ It parses and reads data from an input file. It takes care of big endianess, and keeps track of the cursor position. The midi parser only reads from this object. Never directly from the file. """ def __init__(self, infile=''): """ If 'file' is a string we assume it is a path and read from that file. If it is a file descriptor we read from the file, but we don't close it. Midi files are usually pretty small, so it should be safe to copy them into memory. """ if infile: if isinstance(infile, str): infile = open(infile, 'rb') self.data = infile.read() infile.close() else: # don't close the f self.data = infile.read() else: self.data = '' # start at beginning ;-) self.cursor = 0 # setting up data manually def setData(self, data=''): "Sets the data from a string." self.data = data # cursor operations def setCursor(self, position=0): "Sets the absolute position if the cursor" self.cursor = position def getCursor(self): "Returns the value of the cursor" return self.cursor def moveCursor(self, relative_position=0): "Moves the cursor to a new relative position" self.cursor += relative_position # native data reading functions def nextSlice(self, length, move_cursor=1): "Reads the next text slice from the raw data, with length" c = self.cursor slc = self.data[c:c+length] if move_cursor: self.moveCursor(length) return slc def readBew(self, n_bytes=1, move_cursor=1): """ Reads n bytes of date from the current cursor position. Moves cursor if move_cursor is true """ return readBew(self.nextSlice(n_bytes, move_cursor)) def readVarLen(self): """ Reads a variable length value from the current cursor position. Moves cursor if move_cursor is true """ MAX_VARLEN = 4 # Max value varlen can be var = readVar(self.nextSlice(MAX_VARLEN, 0)) # only move cursor the actual bytes in varlen self.moveCursor(varLen(var)) return var if __name__ == '__main__': test_file = 'test/midifiles/minimal.mid' fis = RawInstreamFile(test_file) print (fis.nextSlice(len(fis.data))) test_file = 'test/midifiles/cubase-minimal.mid' cubase_minimal = open(test_file, 'rb') fis2 = RawInstreamFile(cubase_minimal) print (fis2.nextSlice(len(fis2.data))) cubase_minimal.close()
{ "content_hash": "7d7edfba4ae92b816fcd19d0f222368f", "timestamp": "", "source": "github", "line_count": 104, "max_line_length": 71, "avg_line_length": 28.048076923076923, "alnum_prop": 0.5831333561878642, "repo_name": "hgijeon/the_PLAY", "id": "fdde72591bcf06b60ab56b5f16b6844454c393b8", "size": "2997", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Structure/Middle/MIDI/RawInstreamFile.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "118916" } ], "symlink_target": "" }
import logging import redis import json import re import os from pylons import request, response, session, tmpl_context as c, url from pylons.controllers.util import abort, redirect from pylons.decorators import jsonify from mapofinnovation.lib.base import BaseController, render from datetime import datetime log = logging.getLogger(__name__) class BaseapiController(BaseController): @jsonify def getAllSpaces(self): #return all spaces in json format spaceslist = [] if os.environ.get("REDIS_URL") : redis_url = os.environ.get("REDIS_URL") else: redis_url = "localhost" r = redis.from_url(redis_url) for key in r.scan_iter(): row = r.hgetall(key) space={} for i in row: print i if i in ("image_url", "g_place_id"): pass else: space[i]=unicode(row[i], errors='replace') spaceslist.append(space) return spaceslist @jsonify def addSpace(self): #add a space if os.environ.get("REDIS_URL") : redis_url = os.environ.get("REDIS_URL") else: redis_url = "localhost" r = redis.from_url(redis_url) surl = request.params.get("primary_website") exists = False if surl is None : pass else : exists = self._search_space(surl) if exists is False: tparams=request.params dparams = {} for k,v in tparams.items(): dparams.update({k:v}) dparams.update({'archived':False}) dparams.update({'verified':False}) skey = request.params.get("name")+str(datetime.now()) r.hmset(re.sub(' ','',skey),dparams) return {'sucess':'true'} def _search_space(self,surl): #TO DO : implement search function return False @jsonify def changeSpace(self): #change a space #TO DO: implement change space for verified space skey = request.params.get("id") if os.environ.get("REDIS_URL") : redis_url = os.environ.get("REDIS_URL") else: redis_url = "localhost" r = redis.from_url(redis_url) tparams=request.params dparams = {} for k,v in tparams.items(): dparams.update({k:v}) r.hmset(skey,dparams) return {'sucess':'true'} @jsonify def archiveSpace(self): #archive a space skey = request.params.get("id") if os.environ.get("REDIS_URL") : redis_url = os.environ.get("REDIS_URL") else: redis_url = "localhost" r = redis.from_url(redis_url) r.hset(skey,'archived',True) return {'sucess':'true'}
{ "content_hash": "4fb922e5c397d02271cd2e8fce035717", "timestamp": "", "source": "github", "line_count": 93, "max_line_length": 69, "avg_line_length": 25.06451612903226, "alnum_prop": 0.6748176748176749, "repo_name": "AnanseGroup/map-of-innovation", "id": "0d6059c220d30963501a77d0d4d9ade5182a044d", "size": "2331", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "mapofinnovation/controllers/baseapi.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "29402" }, { "name": "HTML", "bytes": "26867" }, { "name": "JavaScript", "bytes": "4881" }, { "name": "Python", "bytes": "47944" }, { "name": "Shell", "bytes": "52" } ], "symlink_target": "" }
"""Post-processing model outputs to generate detection.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools from six.moves import range import tensorflow.compat.v1 as tf from utils import box_utils def generate_detections_factory(params): """Factory to select function to generate detection.""" if params.use_batched_nms: raise ValueError('Batched NMS is not supported.') else: func = functools.partial( _generate_detections_v1, max_total_size=params.max_total_size, nms_iou_threshold=params.nms_iou_threshold, score_threshold=params.score_threshold, pre_nms_num_boxes=params.pre_nms_num_boxes) return func def _generate_detections_v1(boxes, scores, attributes, max_total_size=100, nms_iou_threshold=0.3, score_threshold=0.05, pre_nms_num_boxes=5000): """Generate the final detections given the model outputs. This uses batch unrolling, which is TPU compatible. Args: boxes: a tensor with shape [batch_size, N, num_classes, 4] or [batch_size, N, 1, 4], which box predictions on all feature levels. The N is the number of total anchors on all levels. scores: a tensor with shape [batch_size, N, num_classes], which stacks class probability on all feature levels. The N is the number of total anchors on all levels. The num_classes is the number of classes predicted by the model. Note that the class_outputs here is the raw score. attributes: a tensor with shape [batch_size, N, num_attributes], which stacks attribute probability on all feature levels. max_total_size: a scalar representing maximum number of boxes retained over all classes. nms_iou_threshold: a float representing the threshold for deciding whether boxes overlap too much with respect to IOU. score_threshold: a float representing the threshold for deciding when to remove boxes based on score. pre_nms_num_boxes: an int number of top candidate detections per class before NMS. Returns: nmsed_boxes: `float` Tensor of shape [batch_size, max_total_size, 4] representing top detected boxes in [y1, x1, y2, x2]. nmsed_scores: `float` Tensor of shape [batch_size, max_total_size] representing sorted confidence scores for detected boxes. The values are between [0, 1]. nmsed_classes: `int` Tensor of shape [batch_size, max_total_size] representing classes for detected boxes. nmsed_attributes: `int` Tensor of shape [batch_size, max_total_size, num_attributes] representing attributes for detected boxes. valid_detections: `int` Tensor of shape [batch_size] only the top `valid_detections` boxes are valid detections. """ with tf.name_scope('generate_detections'): batch_size = scores.get_shape().as_list()[0] nmsed_boxes = [] nmsed_classes = [] nmsed_attributes = [] nmsed_scores = [] valid_detections = [] for i in range(batch_size): (nmsed_boxes_i, nmsed_scores_i, nmsed_classes_i, nmsed_attributes_i, valid_detections_i) = _generate_detections_per_image( boxes[i], scores[i], attributes[i], max_total_size, nms_iou_threshold, score_threshold, pre_nms_num_boxes) nmsed_boxes.append(nmsed_boxes_i) nmsed_scores.append(nmsed_scores_i) nmsed_classes.append(nmsed_classes_i) nmsed_attributes.append(nmsed_attributes_i) valid_detections.append(valid_detections_i) nmsed_boxes = tf.stack(nmsed_boxes, axis=0) nmsed_scores = tf.stack(nmsed_scores, axis=0) nmsed_classes = tf.stack(nmsed_classes, axis=0) nmsed_attributes = tf.stack(nmsed_attributes, axis=0) valid_detections = tf.stack(valid_detections, axis=0) return (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_attributes, valid_detections) def _generate_detections_per_image(boxes, scores, attributes, max_total_size=100, nms_iou_threshold=0.3, score_threshold=0.05, pre_nms_num_boxes=5000): """Generate the final detections per image given the model outputs. Args: boxes: a tensor with shape [N, num_classes, 4] or [N, 1, 4], which box predictions on all feature levels. The N is the number of total anchors on all levels. scores: a tensor with shape [N, num_classes], which stacks class probability on all feature levels. The N is the number of total anchors on all levels. The num_classes is the number of classes predicted by the model. Note that the class_outputs here is the raw score. attributes: a tensor with shape [N, num_attributes], which stacks attribute probability on all feature levels. max_total_size: a scalar representing maximum number of boxes retained over all classes. nms_iou_threshold: a float representing the threshold for deciding whether boxes overlap too much with respect to IOU. score_threshold: a float representing the threshold for deciding when to remove boxes based on score. pre_nms_num_boxes: an int number of top candidate detections per class before NMS. Returns: nmsed_boxes: `float` Tensor of shape [max_total_size, 4] representing top detected boxes in [y1, x1, y2, x2]. nmsed_scores: `float` Tensor of shape [max_total_size] representing sorted confidence scores for detected boxes. The values are between [0, 1]. nmsed_classes: `int` Tensor of shape [max_total_size] representing classes for detected boxes. nmsed_attributes: `int` Tensor of shape [max_total_size, num_attributes] representing attributes for detected boxes. valid_detections: `int` Tensor of shape [1] only the top `valid_detections` boxes are valid detections. """ nmsed_boxes = [] nmsed_scores = [] nmsed_classes = [] nmsed_attributes = [] num_classes_for_box = boxes.get_shape().as_list()[1] num_classes = scores.get_shape().as_list()[1] for i in range(num_classes): boxes_i = boxes[:, min(num_classes_for_box - 1, i)] scores_i = scores[:, i] # Obtains pre_nms_num_boxes before running NMS. scores_i, indices = tf.nn.top_k( scores_i, k=tf.minimum(tf.shape(scores_i)[-1], pre_nms_num_boxes)) boxes_i = tf.gather(boxes_i, indices) attributes_i = tf.gather(attributes, indices) (nmsed_indices_i, nmsed_num_valid_i) = tf.image.non_max_suppression_padded( tf.cast(boxes_i, tf.float32), tf.cast(scores_i, tf.float32), max_total_size, iou_threshold=nms_iou_threshold, score_threshold=score_threshold, pad_to_max_output_size=True, name='nms_detections_' + str(i)) nmsed_boxes_i = tf.gather(boxes_i, nmsed_indices_i) nmsed_scores_i = tf.gather(scores_i, nmsed_indices_i) nmsed_attributes_i = tf.gather(attributes_i, nmsed_indices_i) # Sets scores of invalid boxes to -1. nmsed_scores_i = tf.where( tf.less(tf.range(max_total_size), [nmsed_num_valid_i]), nmsed_scores_i, -tf.ones_like(nmsed_scores_i)) nmsed_classes_i = tf.fill([max_total_size], i) nmsed_boxes.append(nmsed_boxes_i) nmsed_scores.append(nmsed_scores_i) nmsed_classes.append(nmsed_classes_i) nmsed_attributes.append(nmsed_attributes_i) # Concats results from all classes and sort them. nmsed_boxes = tf.concat(nmsed_boxes, axis=0) nmsed_scores = tf.concat(nmsed_scores, axis=0) nmsed_classes = tf.concat(nmsed_classes, axis=0) nmsed_attributes = tf.concat(nmsed_attributes, axis=0) nmsed_scores, indices = tf.nn.top_k( nmsed_scores, k=max_total_size, sorted=True) nmsed_boxes = tf.gather(nmsed_boxes, indices) nmsed_classes = tf.gather(nmsed_classes, indices) nmsed_attributes = tf.gather(nmsed_attributes, indices) valid_detections = tf.reduce_sum( tf.cast(tf.greater(nmsed_scores, -1), tf.int32)) return (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_attributes, valid_detections) class GenericDetectionGenerator(object): """Generates the final detected boxes with scores and classes.""" def __init__(self, params): self._apply_nms = params.apply_nms self._generate_detections = generate_detections_factory(params) def __call__(self, box_outputs, class_outputs, attribute_outputs, anchor_boxes, image_shape): """Generate final detections. Args: box_outputs: a tensor of shape of [batch_size, K, num_classes * 4] representing the class-specific box coordinates relative to anchors. class_outputs: a tensor of shape of [batch_size, K, num_classes] representing the class logits before applying score activiation. attribute_outputs: a tensor of shape of [batch_size, K, num_attributes] representing the attribute logits before applying score activiation. anchor_boxes: a tensor of shape of [batch_size, K, 4] representing the corresponding anchor boxes w.r.t `box_outputs`. image_shape: a tensor of shape of [batch_size, 2] storing the image height and width w.r.t. the scaled image, i.e. the same image space as `box_outputs` and `anchor_boxes`. Returns: nmsed_boxes: `float` Tensor of shape [batch_size, max_total_size, 4] representing top detected boxes in [y1, x1, y2, x2]. nmsed_scores: `float` Tensor of shape [batch_size, max_total_size] representing sorted confidence scores for detected boxes. The values are between [0, 1]. nmsed_classes: `int` Tensor of shape [batch_size, max_total_size] representing classes for detected boxes. nmsed_attributes: `int` Tensor of shape [batch_size, max_total_size, num_attributes] representing attributes for detected boxes. valid_detections: `int` Tensor of shape [batch_size] only the top `valid_detections` boxes are valid detections. """ class_outputs = tf.nn.softmax(class_outputs, axis=-1) attribute_outputs = tf.math.sigmoid(attribute_outputs) # Removes the background class. class_outputs_shape = tf.shape(class_outputs) batch_size = class_outputs_shape[0] num_locations = class_outputs_shape[1] num_classes = class_outputs_shape[-1] num_detections = num_locations * (num_classes - 1) class_outputs = tf.slice(class_outputs, [0, 0, 1], [-1, -1, -1]) box_outputs = tf.reshape( box_outputs, tf.stack([batch_size, num_locations, num_classes, 4], axis=-1)) box_outputs = tf.slice( box_outputs, [0, 0, 1, 0], [-1, -1, -1, -1]) anchor_boxes = tf.tile( tf.expand_dims(anchor_boxes, axis=2), [1, 1, num_classes - 1, 1]) box_outputs = tf.reshape( box_outputs, tf.stack([batch_size, num_detections, 4], axis=-1)) anchor_boxes = tf.reshape( anchor_boxes, tf.stack([batch_size, num_detections, 4], axis=-1)) # Box decoding. decoded_boxes = box_utils.decode_boxes( box_outputs, anchor_boxes, weights=[10.0, 10.0, 5.0, 5.0]) # Box clipping decoded_boxes = box_utils.clip_boxes(decoded_boxes, image_shape) decoded_boxes = tf.reshape( decoded_boxes, tf.stack([batch_size, num_locations, num_classes - 1, 4], axis=-1)) if not self._apply_nms: return { 'raw_boxes': decoded_boxes, 'raw_scores': class_outputs, 'raw_attributes': attribute_outputs, } (nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_attributes, valid_detections) = self._generate_detections( decoded_boxes, class_outputs, attribute_outputs) # Adds 1 to offset the background class which has index 0. nmsed_classes += 1 return { 'num_detections': valid_detections, 'detection_boxes': nmsed_boxes, 'detection_classes': nmsed_classes, 'detection_attributes': nmsed_attributes, 'detection_scores': nmsed_scores, }
{ "content_hash": "e2763419b0df3010224bd891288e5540", "timestamp": "", "source": "github", "line_count": 289, "max_line_length": 80, "avg_line_length": 42.622837370242216, "alnum_prop": 0.6583049196298101, "repo_name": "tensorflow/tpu", "id": "b3d327d886ee0d16e27150470b8ea810478ea96f", "size": "13007", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "models/official/detection/projects/fashionpedia/ops/postprocess_ops.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "754301" }, { "name": "Dockerfile", "bytes": "2734" }, { "name": "Go", "bytes": "226317" }, { "name": "Jupyter Notebook", "bytes": "56231509" }, { "name": "Makefile", "bytes": "2369" }, { "name": "Python", "bytes": "3444271" }, { "name": "Shell", "bytes": "21032" }, { "name": "Starlark", "bytes": "164" } ], "symlink_target": "" }
"""aniauth accounts app URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.10/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) """ from django.conf.urls import url import evexml.views as views urlpatterns = [ url(r'^add/$', views.AddAPIView.as_view(), name='eveapi_add'), url(r'^added', views.AddedAPIView.as_view(), name='eveapi_added'), ]
{ "content_hash": "04bd4d42a7bdb98ca71d47181ad21687", "timestamp": "", "source": "github", "line_count": 26, "max_line_length": 79, "avg_line_length": 34.26923076923077, "alnum_prop": 0.6823793490460157, "repo_name": "randomic/aniauth-tdd", "id": "83add2cdd8010e1e6369ce1dea7bd572dfba6453", "size": "891", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "evexml/urls.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "6290" }, { "name": "Python", "bytes": "48237" } ], "symlink_target": "" }
""" TMVA factory runs with classifier and additional information """ from __future__ import division, print_function, absolute_import import sys import os from rootpy.io import root_open import ROOT from . import tmva from six.moves import cPickle as pickle __author__ = 'Tatiana Likhomanenko' def tmva_process(classifier, info): """ Create TMVA classification factory, train, test and evaluate all methods :param rep.estimators.tmva.TMVAClassifier | rep.estimators.tmva.TMVARegressor classifier: classifier to train :param rep.estimators.tmva._AdditionalInformation info: additional information """ ROOT.TMVA.Tools.Instance() file_out = ROOT.TFile(os.path.join(info.directory, info.tmva_root), "RECREATE") factory = ROOT.TMVA.Factory(info.tmva_job, file_out, classifier.factory_options) for var in info.features: factory.AddVariable(var) # Set data file_root = root_open(info.filename, mode='update') if info.model_type == 'classification': # signal must the first added tree, because rectangular cut optimization in another wat doesn't work factory.AddTree(file_root[info.treename], 'Signal', 1., ROOT.TCut("{column} == {label}".format(column=info.target_column, label=1)), 'Training') factory.AddTree(file_root[info.treename], 'Signal', 1., ROOT.TCut("{column} == {label}".format(column=info.target_column, label=1)), 'Testing') factory.AddTree(file_root[info.treename], 'Background', 1., ROOT.TCut("{column} == {label}".format(column=info.target_column, label=0)), 'Training') factory.AddTree(file_root[info.treename], 'Background', 1., ROOT.TCut("{column} == {label}".format(column=info.target_column, label=0)), 'Testing') factory.SetWeightExpression(info.weight_column) elif info.model_type == 'regression': factory.AddTarget(info.target_column) factory.AddTree(file_root[info.treename], 'Regression', 1., ROOT.TCut(""), "Training") factory.AddTree(file_root[info.treename], 'Regression', 1., ROOT.TCut(""), 'Testing') factory.SetWeightExpression(info.weight_column, "Regression") else: raise NotImplementedError("Doesn't support type {}".format(info.model_type)) # Set method parameters = ":".join( ["{key}={value}".format(key=key, value=value) for key, value in classifier.method_parameters.items()]) factory.BookMethod(ROOT.TMVA.Types.__getattribute__(ROOT.TMVA.Types, classifier.method), classifier._method_name, parameters) factory.TrainAllMethods() factory.TestAllMethods() factory.EvaluateAllMethods() file_out.Close() file_root.Close() def main(): # Reading the configuration from stdin classifier = pickle.load(sys.stdin) info = pickle.load(sys.stdin) assert isinstance(classifier, tmva.TMVAClassifier) or isinstance(classifier, tmva.TMVARegressor) assert isinstance(info, tmva._AdditionalInformation) tmva_process(classifier, info)
{ "content_hash": "393f8ad18c5b9fef8b94866d3ec6e973", "timestamp": "", "source": "github", "line_count": 81, "max_line_length": 117, "avg_line_length": 39.53086419753087, "alnum_prop": 0.6574016239850093, "repo_name": "Quadrocube/rep", "id": "eefa50a41be6e86584dc4722015bb6d8f23084e9", "size": "3202", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "rep/estimators/_tmvaFactory.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "311732" }, { "name": "Shell", "bytes": "195" } ], "symlink_target": "" }
from __future__ import unicode_literals, print_function from pprint import pformat try: from collections import OrderedDict from argparse import ArgumentParser except ImportError: # Python 2.6 stuff from tests.OrderedDict import OrderedDict from tests.argparse import ArgumentParser from ashes import AshesEnv, Template import tests from tests import ALL_TEST_MODULES, OPS, AshesTest import unittest DEFAULT_WIDTH = 70 LEGEND = '. = passed, _ = skipped, X = failed, E = exception' import sys PY3 = (sys.version_info[0] == 3) if PY3: unicode = str basestring = str def get_line(title, items, twidth=20, talign='>', width=DEFAULT_WIDTH): if len(title) > twidth: title = title[:twidth - 3] + '...' rwidth = width - twidth items = list(items or []) pw = int(rwidth / len(items)) item_tmpl = ''.join(['{' + str(i) + ':^{pw}}' for i in range(len(items))]) tmpl = '{title:{talign}{twidth}}' + item_tmpl return tmpl.format(title=title, talign=talign, twidth=twidth, pw=pw, *items) def get_unit_tests(module): tests = [t for t in module.__dict__.values() if isinstance(t, type) and issubclass(t, unittest.TestCase) and t is not unittest.TestCase] return tests def get_sorted_tests(module): tests = [t for t in module.__dict__.values() if hasattr(t, 'ast') and issubclass(t, AshesTest) and t is not AshesTest] return sorted(tests, key=lambda x: len(x.template or '')) def get_test_results(test_cases, raise_on=None): env = AshesEnv(keep_whitespace=False) ret = [] for tc in test_cases: if issubclass(tc, AshesTest): env.register(Template(tc.name, tc.template, env=env, lazy=True)) for tc in test_cases: if issubclass(tc, AshesTest): raise_exc = (tc.name == raise_on) ret.append(tc.get_test_result(env, raise_exc=raise_exc)) return ret def get_grid(test_results, title, width=DEFAULT_WIDTH): lines = ['', ' ' + LEGEND, ''] if test_results: test_count = len(test_results) col_names = [dt.op_name for dt in OPS] headings = get_line(title, col_names, talign='^') lines.append(headings) rstripped_width = len(headings.rstrip()) bar_str = '-' * (rstripped_width + 1) lines.append(bar_str) counters = OrderedDict([(cn, 0) for cn in col_names]) for tres in test_results: lines.append(get_line(tres.name, tres.get_symbols())) for dtr in tres.results: if dtr.test_result is True: counters[dtr.op_name] += 1 lines.append(bar_str) lines.append(get_line('(%s total)' % test_count, counters.values())) else: lines.append('No tests found.') return '\n'.join(lines + ['']) def get_single_report(name, op=None, verbose=None, debug=None): raise_on = None mod_name, _, test_name = name.rpartition('.') test_module = getattr(tests, mod_name or 'dust_site') lookup = dict([(k.lower(), v) for k, v in test_module.__dict__.items()]) try: test = lookup[test_name.lower()] except KeyError: print('No test named: %r' % name) return if debug: raise_on = test.name try: tres = get_test_results([test], raise_on)[0] except Exception as e: print(e) import pdb pdb.post_mortem() raise lines = [] for op_name, result, result_ref, test_result in tres.results: if not verbose and (test_result is True or test_result == 'skipped'): continue if op: if op_name != op: continue else: lines.append('') lines.append(' * %s %s reference:' % (name, op_name)) if not isinstance(result_ref, basestring): result_ref = pformat(result_ref) lines.extend(['----', result_ref, '----', '']) lines.append(' * %s %s actual:' % (name, op_name)) if not isinstance(result, basestring): result = pformat(result) lines.extend(['----', result, '----']) if not lines: lines = ['No results found for test: %r' % name] else: lines = ['', 'Test results for %r' % name] + lines return '\n'.join(lines) def parse_args(): prs = ArgumentParser(description="command central for developing and" " testing Ashes.") prs.add_argument('--name', help='see results for this test case') prs.add_argument('--op', help='only see test result for this operation') prs.add_argument('--verbose', action='store_true', help='also show results of passing ops') prs.add_argument('--debug', action='store_true', help='pop a pdb.post_mortem() on exceptions') prs.add_argument('--benchmark', action='store_true', help='run benchmarks') prs.add_argument('--run_unittests', action='store_true', help='run unittests') prs.add_argument('--disable_core', action='store_true', help='disable core tests') prs.add_argument('--benchtest', action='store_true', help='run testing benchmark; disables everything else') return prs.parse_args() def main(width=DEFAULT_WIDTH): args = parse_args() name = args.name run_benchmarks = args.benchmark or False run_unittests = args.run_unittests or False disable_core = args.disable_core or False # if we're running the benchtest for profiling, thats it! run_benchtest = args.benchtest or False if run_benchtest: disable_core = True run_benchmarks = False run_unittests = False if not disable_core: if not name: # remember `tests` is a namespace. don't overwrite! for test_mod in ALL_TEST_MODULES: title = getattr(test_mod, 'heading', '') _tests = get_sorted_tests(test_mod) test_results = get_test_results(_tests) grid = get_grid(test_results, title) if grid: print(test_mod) print(grid) else: single_rep = get_single_report(name, args.op, args.verbose, args.debug) if single_rep: print(single_rep) # do we have unittests? if run_unittests: _unit_tests = [] for test_mod in ALL_TEST_MODULES: _tests = get_unit_tests(test_mod) if _tests: _unit_tests.extend(_tests) if _unit_tests: loader = unittest.TestLoader() suites_list = [] for _test in _unit_tests: suite = loader.loadTestsFromTestCase(_test) suites_list.append(suite) big_suite = unittest.TestSuite(suites_list) runner = unittest.TextTestRunner(verbosity=3) results = runner.run(big_suite) # toggled! if run_benchmarks: import tests tests.benchmarks.bench_render_repeat() tests.benchmarks.bench_render_reinit() tests.benchmarks.bench_cacheable_templates() if run_benchtest: import tests.utils_profiling import time filename_stats = 'stats-%s.csv' % time.time() tests.utils_profiling.profile_function(tests.benchmarks.bench_render_repeat, filename_stats) if __name__ == '__main__': main()
{ "content_hash": "eb1bcf72ae70588e657d39a699225e40", "timestamp": "", "source": "github", "line_count": 222, "max_line_length": 100, "avg_line_length": 34.12162162162162, "alnum_prop": 0.5762376237623762, "repo_name": "jvanasco/ashes", "id": "8cbdf304464d0f91eb4c90f458f48d95b6e7c278", "size": "7597", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "run_tests.py", "mode": "33261", "license": "bsd-3-clause", "language": [ { "name": "AGS Script", "bytes": "52" }, { "name": "HTML", "bytes": "7175" }, { "name": "Python", "bytes": "253092" } ], "symlink_target": "" }
from mock import Mock, patch import unittest from pin.lib import p from tests import fixtures class TestService(unittest.TestCase): def setUp(self): fixtures.reset() self.service = p.modes["service"] self.service.enable() def test_menu_next(self): p.events.post("switch_service_up") p.events.dispatch() self.assertEquals("Tests", self.service.title.style["text"]) def test_menu_previous(self): p.events.post("switch_service_down") p.events.dispatch() self.assertEquals("Utilities", self.service.title.style["text"]) def test_menu_next_suspended(self): self.service.suspend() p.events.post("switch_service_up") p.events.dispatch() self.assertEquals("Settings", self.service.title.style["text"]) self.service.resume() p.events.post("switch_service_up") p.events.dispatch() self.assertEquals("Tests", self.service.title.style["text"]) def test_menu_exit(self): p.events.post("switch_service_exit") p.events.dispatch() self.assertFalse(self.service.enabled) def test_menu_down(self): p.events.post("switch_service_enter") p.events.dispatch() self.assertEquals("General", self.service.title.style["text"]) def test_menu_cache(self): p.events.post("switch_service_up") # Go to Tests p.events.post("switch_service_enter") # Enter Tests p.events.post("switch_service_up") # Go to Coils p.events.post("switch_service_exit") # Back to main p.events.post("switch_service_enter") # Enter Tests p.events.dispatch() self.assertEquals("Coils", self.service.title.style["text"]) def test_option_select(self): p.events.post("switch_service_enter") # Enter Settings p.events.post("switch_service_down") # General -> Pricing p.events.post("switch_service_enter") # Enter Pricing p.events.post("switch_service_up") # Free Play -> Game Pricing p.events.post("switch_service_enter") # Select Game Pricing p.events.dispatch() self.assertEquals("1 for 0.50", self.service.value.style["text"]) def test_option_select_next(self): p.events.post("switch_service_enter") # Enter Settings p.events.post("switch_service_down") # General -> Pricing p.events.post("switch_service_enter") # Enter Pricing p.events.post("switch_service_up") # Free Play -> Game Pricing p.events.post("switch_service_enter") # Select Game Pricing p.events.post("switch_service_up") # Up to 1 for 0.75 p.events.dispatch() self.assertEquals("1 for 0.75", self.service.value.style["text"]) def test_option_select_previous(self): p.events.post("switch_service_enter") # Enter Settings p.events.post("switch_service_down") # General -> Pricing p.events.post("switch_service_enter") # Enter Pricing p.events.post("switch_service_up") # Free Play -> Game Pricing p.events.post("switch_service_enter") # Select Game Pricing p.events.post("switch_service_down") # Down to 1 for 0.25 p.events.dispatch() self.assertEquals("1 for 0.25", self.service.value.style["text"]) def test_data_display(self): p.data["earnings"] = 1.23 p.events.post("switch_service_up") # Tests p.events.post("switch_service_up") # Audits p.events.post("switch_service_enter") # Enter Audtis p.events.dispatch() self.assertEquals("1.23", self.service.value.style["text"]) def test_action(self): p.events.post("switch_service_down") # To Utilities p.events.post("switch_service_enter") # Enter Utilities p.events.post("switch_service_down") # To More... p.events.post("switch_service_down") # To Debug p.events.post("switch_service_down") # To Browsers p.events.post("switch_service_enter") # Enter Browsers p.events.post("switch_service_enter") # Select Music Browser p.events.dispatch() self.assertTrue(p.modes["music_browser"].enabled) def test_confirm_action(self): p.data["credits"] = 10 p.events.post("switch_service_down") # Utilities p.events.post("switch_service_enter") # Enter Utilities p.events.post("switch_service_up") # Clear p.events.post("switch_service_enter") # Enter Clear p.events.post("switch_service_enter") # Clear Credits p.events.post("switch_service_up") # Move to YES p.events.post("switch_service_enter") # Confirm p.events.dispatch() self.assertEquals(0, p.data["credits"]) def test_confirm_action_previous(self): p.data["credits"] = 10 p.events.post("switch_service_down") # Utilities p.events.post("switch_service_enter") # Enter Utilities p.events.post("switch_service_up") # Clear p.events.post("switch_service_enter") # Enter Clear p.events.post("switch_service_enter") # Clear Credits p.events.post("switch_service_down") # Move to YES p.events.post("switch_service_enter") # Confirm p.events.dispatch() self.assertEquals(0, p.data["credits"]) def test_cancel_action(self): p.data["credits"] = 10 p.events.post("switch_service_down") # Utilities p.events.post("switch_service_enter") # Enter Utilities p.events.post("switch_service_up") # Clear p.events.post("switch_service_enter") # Enter Clear p.events.post("switch_service_enter") # Clear Credits p.events.post("switch_service_enter") # Select NO p.events.dispatch() self.assertEquals(10, p.data["credits"]) def test_cancel_exit(self): p.data["credits"] = 10 p.events.post("switch_service_down") # Utilities p.events.post("switch_service_enter") # Enter Utilities p.events.post("switch_service_up") # Clear p.events.post("switch_service_enter") # Enter Clear p.events.post("switch_service_enter") # Clear Credits p.events.post("switch_service_exit") # Exit out p.events.dispatch() self.assertEquals(10, p.data["credits"]) def test_save(self): p.data["free_play"] = False p.events.post("switch_service_enter") # Enter Settings p.events.post("switch_service_up") # General -> Gameplay p.events.post("switch_service_up") # Gameplay -> Pricing p.events.post("switch_service_enter") # Enter Pricing p.events.post("switch_service_enter") # Enter Free Play p.events.post("switch_service_up") # Select YES p.events.post("switch_service_enter") # Save p.events.dispatch() self.assertTrue(p.data["free_play"]) p.now = 5 # Remove confirmation message p.timers.service() def test_no_change(self): p.data["free_play"] = False p.events.post("switch_service_enter") # Enter Settings p.events.post("switch_service_up") # General -> Gameplay p.events.post("switch_service_up") # Gameplay -> Pricing p.events.post("switch_service_enter") # Enter Pricing p.events.post("switch_service_enter") # Enter Free Play p.events.post("switch_service_enter") # No Change p.events.dispatch() self.assertFalse(p.data["free_play"]) class TestServiceActions(unittest.TestCase): def setUp(self): self.service = p.modes["service"] self.service.enable() def test_clear_credits(self): p.data["credits"] = 5 self.service.clear_credits() self.assertEquals(0, p.data["credits"]) def test_movie_browser(self): self.service.movie_browser() self.assertTrue(p.modes["movie_browser"].enabled) def test_music_browser(self): self.service.music_browser() self.assertTrue(p.modes["music_browser"].enabled) def test_sound_browser(self): self.service.sound_browser() self.assertTrue(p.modes["sound_browser"].enabled) def test_font_browser(self): self.service.font_browser() self.assertTrue(p.modes["font_browser"].enabled) def test_image_browser(self): self.service.image_browser() self.assertTrue(p.modes["image_browser"].enabled)
{ "content_hash": "4be95fcd23aeeb84bd7ac8a06a8e6447", "timestamp": "", "source": "github", "line_count": 205, "max_line_length": 79, "avg_line_length": 41.74634146341464, "alnum_prop": 0.6180182285580743, "repo_name": "town-hall-pinball/project-omega", "id": "88406e240af1b850161f33f081ea04ead7dd89fb", "size": "9664", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/service/test_service.py", "mode": "33188", "license": "mit", "language": [ { "name": "ApacheConf", "bytes": "151" }, { "name": "CSS", "bytes": "116351" }, { "name": "HTML", "bytes": "6419" }, { "name": "JavaScript", "bytes": "25150" }, { "name": "Python", "bytes": "539013" }, { "name": "Shell", "bytes": "2713" } ], "symlink_target": "" }
import romeo #avoid import circularites, reference everything from the top object @romeo.grammars.query(pattern="^select (?P<parameter>.+)", form="select <key>", help="returns a list of objects with the given key") def select_parameter(parameter): for obj in romeo.foundation.RomeoKeyValue.search(parameter): yield obj @romeo.grammars.query(pattern="^select (?P<key>.+) where (?P<key2>.+) is (?P<value>.+)", form="select key1 where key2 is value", help="look for the object where a known key value pair exists") def compound_select(key, key2, value): if romeo.foundation.RomeoKeyValue.exists(key2, value): test = romeo.foundation.RomeoKeyValue(key2, value) for obj in romeo.foundation.RomeoKeyValue.objects: if obj.KEY != key: continue if not test.isRelated(obj): continue yield obj @romeo.grammars.query(pattern="^my (?P<parameter>.+)", form="my <key>", help="return object matching the parameter belonging to %s" % (romeo.MYHOSTNAME,)) def my_parameter(parameter): me = romeo.whoami() if me: for child in me.CHILDREN: if child.KEY != parameter: continue yield child
{ "content_hash": "73dafeba7eb0dbf9bb0344a948a617ce", "timestamp": "", "source": "github", "line_count": 27, "max_line_length": 88, "avg_line_length": 43.666666666666664, "alnum_prop": 0.6793893129770993, "repo_name": "OrbitzWorldwide/droned", "id": "ca4eac849546658c9b68634bb183b2ea32fa9e29", "size": "1962", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "romeo/lib/romeo/grammars/_builtins.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "760534" }, { "name": "Shell", "bytes": "1083" } ], "symlink_target": "" }
import cookielib import mechanize #import weakref import random import time import uuid import sys from loremipsum import get_sentence from birdie_settings import * from initialize_db import ( User, DBSession, ) class FakeUser(object): def __init__(self, browser): rand = random.randrange(0, MAX_USERS) row = DBSession.query(User.id, User.username, User.password)[rand] self.id = row.id self.username = row.username self.password = row.password self.br = browser self.logged_in = False def log_in(self): timer = () if not self.logged_in: timer = _login(self.br, self.username, self.password) if timer[0] == 'Login': self.logged_in = True return timer def post_chirp(self): latency=0 timer = () br = self.br _ = br.open(BASE_URL+'/'+self.username) # chirps_count=random.randrange(1, MAX_CHIRPS) # for i in range(chirps_count): br.select_form(nr=0) br.form[ 'chirp' ] = get_sentence() start_timer = time.time() resp = br.submit() resp.read() latency += time.time() - start_timer # verify responses are valid assert (resp.code == 200), 'Bad Response: HTTP %s' % resp.code # assert ('my birdie' in resp.get_data()), 'Text Assertion Failed' timer = 'Chirp', latency return timer def follow(self): br=self.br # randomly pick a friend - may be myself or a friend of mine, don't care friend = FakeUser(self.br) start_timer = time.time() resp = br.open(BASE_URL+'/'+friend.username+'/follow') resp.read() latency = time.time() - start_timer # verify responses are valid assert (resp.code == 200), 'Bad Response: HTTP %s' % resp.code # assert ('my birdie' in resp.get_data()), 'Text Assertion Failed' timer = 'Follow', latency return timer def unfollow(self): br=self.br # randomly pick a friend - may be myself or not an actual friend of mine, don't care old_friend = FakeUser(self.br) start_timer = time.time() resp = br.open(BASE_URL+'/'+old_friend.username+'/unfollow') resp.read() latency = time.time() - start_timer # verify responses are valid assert (resp.code == 200), 'Bad Response: HTTP %s' % resp.code # assert ('my birdie' in resp.get_data()), 'Text Assertion Failed' timer = 'Unfollow', latency return timer def view(self): br=self.br # randomly pick a user - may be myself or a friend of mine, don't care buddy = FakeUser(self.br) start_timer = time.time() resp = br.open(BASE_URL+'/'+buddy.username+'/view') resp.read() latency = time.time() - start_timer # verify responses are valid assert (resp.code == 200), 'Bad Response: HTTP %s' % resp.code # assert ('my birdie' in resp.get_data()), 'Text Assertion Failed' timer = 'View_profile', latency return timer def __str__(self): return "FakeUser<user=%s,logged_in=%s>" % (self.username, self.logged_in) # utility functions def init_browser(): """Returns an initialized browser and associated cookie jar.""" br = mechanize.Browser() br.set_handle_equiv(True) # br.set_handle_gzip(True) br.set_handle_redirect(True) br.set_handle_referer(True) br.set_handle_robots(False) # add a custom header to declare "Believe me, I am not a robot" br.addheaders = [('User-agent', 'Mozilla/5.0')] br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1) return br def _login(br, u, p): timer = () _ = br.open(BASE_URL+'/login') br.select_form(nr=0) br.form[ 'login' ] = u br.form[ 'password' ] = p start_timer = time.time() resp = br.submit() resp.read() latency = time.time() - start_timer assert (resp.code == 200), 'Bad Response: HTTP %s' % resp.code if 'Failed login' in resp.get_data(): timer= 'Login_failed', latency else: timer = 'Login', latency return timer def add_user(br): timer=() # build a brand new fake user random_uid = str(uuid.uuid4()) fullname = random_uid username = BASE_USERNAME+random_uid[:8] password = random_uid[:8] _ = br.open(BASE_URL+'/join') br.select_form(nr=0) br.form[ 'fullname' ] = fullname br.form[ 'username' ] = username br.form[ 'password' ] = password br.form[ 'confirm' ] = password br.form[ 'about' ] = ABOUT start_time = time.time() resp = br.submit() resp.read() latency = time.time() - start_time # verify responses are valid assert (resp.code == 200), 'Bad Response: HTTP %s' % resp.code if resp.geturl() == BASE_URL+'/join': timer = 'Failed_registration', latency else: timer = 'Register_new_user', latency # add user in the local db (for future retrieval) DBSession.add( User (username=username, password=password) ) # logout and reset cookie resp = br.open(BASE_URL+'/logout') # resp.read() # verify responses are valid assert (resp.code == 200), 'Bad Response: HTTP %s' % resp.code assert ('Public Timeline' in resp.get_data()), 'Text Assertion Failed' return timer def populate_db(br=None, size=MAX_USERS): if not br: br = init_browser() print '' for index in range(size): sys.stdout.write('\rPopulating the database with {} new users over {}'.format(index+1, MAX_USERS)) sys.stdout.flush() add_user(br) DBSession.commit() print ''
{ "content_hash": "dc276642e86c81dc06f957289152682c", "timestamp": "", "source": "github", "line_count": 214, "max_line_length": 106, "avg_line_length": 28.38317757009346, "alnum_prop": 0.563549555482384, "repo_name": "anhaflint/RedisBirdie", "id": "c1a648da37513bad794b17a04f971f74675ae92c", "size": "6074", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "birdie-stress/test_scripts/utils.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "891" }, { "name": "HTML", "bytes": "32049" }, { "name": "Python", "bytes": "30801" } ], "symlink_target": "" }
import time import logging import select import gevent.queue as queue from gevent.lock import RLock class ConnectionWrapper(object): """ConnectionWrapper """ def __init__(self,pool,connection): self.pool = pool self.connection = connection def getConnection(self): return self.connection def __getattr__(self, name): return getattr(self.connection, name) def __getitem__(self, name): return self.connection[name] def __del__(self): self.pool.release(self.connection) del self.pool del self.connection class ConnectionPool(object): params = [] """ Some server have timeouts for their client connections therefore we will need to revalidate the connection after this expiration time. """ expiration = 60 # time in seconds overflow = 2 # hardLimit = maxSize * <the-value-of-overflow> """Connection Pool Shared Across Greenlets/Threads""" def __new__(cls, minSize=5, maxSize=20, timeout=10, *args, **kwargs): """ @param minSize -- pool minimum size @param maxSize -- the maximum size of the pool @param timeout -- time to wait in order to get an element from the pool """ """ @todo: [+]Add Pool size (maxSize) and initial connections(minSize) to create(as init argument) Remove broken/closed connections from the pool. Slowly increase or decrease the pool to its max or min size based on the demand. """ if not hasattr(cls,'_instance'): cls._instance = object.__new__(cls) cls._instance.createdConnections = 0 cls._instance.queue = queue.Queue(maxSize) cls._instance.minSize = minSize cls._instance.maxSize = maxSize cls._instance.timeout = timeout cls._instance.params = [args,kwargs] cls._instance.lock = RLock() hardLimit = maxSize* cls.overflow for x in xrange(minSize): cls._instance.queue.put([time.time(), cls.create(*args,**kwargs) ] ) cls._instance.createdConnections += 1 logging.getLogger().debug('Pool: Created new connection. %s (in queue: %s, created: %s, maxSize: %s, hardMaxSize: %s)' % \ (cls._instance.__class__, cls._instance.queue.qsize(), cls._instance.createdConnections, cls._instance.maxSize, hardLimit)) return cls._instance @classmethod def create(cls, *args, **kwargs): """ Makes the real connection to the data source and returns it. Notice: Implement the creation of the connection in the child class @return object """ raise Exception('Not Implemented') def _getSocket(self, client): """ Gets the transport socket from the client Notice: Implement this method in the child class @return socket """ raise Exception('Implement in child class') def ping(self, client): """ Checks if the connection is still alive. Override this method in the child class if needed @return boolean """ sock = self._getSocket(client) if not sock: return False rlist,wlist,xlist = select.select([sock], [], [], 0) if rlist: # the socket is readable, meaning there is either data from a previous call # (i.e our protocol is out of sync), or the connection was shut down on the # remote side. Either way discard this connection return False return True def get(self): """ Get connection from the pool """ client = None logging.getLogger().debug('Pool: Size: %d, For: %s' % (self.queue.qsize(), self.__class__)) try: block = self.createdConnections > self.maxSize (accessed, client) = self.queue.get(block, self.timeout) except queue.Empty: logging.getLogger().warn('Pool: No connection in pool: %s' % self.__class__) if client is not None and accessed + self.expiration < time.time() : # check if the client is still valid logging.getLogger().debug('Pool: Pinging client: %s' % client) if not self.ping(client): logging.getLogger().debug('Pool: Discarding Invalid Connection: %s' % client) try: self.close(client) except Exception, ex: logging.getLogger().warn('Pool: Unable to close client. Got exception: %s' % ex) client = None self.createdConnections -= 1 if not client: if self.queue.qsize() > self.minSize: logging.getLogger().debug('Pool: Waiting for connection. %s (in queue: %s, created: %s, maxSize: %s)' % \ (self.__class__, self.queue.qsize(), self.createdConnections, self.maxSize)) return self.get() self.lock.acquire() try: hardLimit = self.maxSize*self.overflow if self.createdConnections < hardLimit: logging.getLogger().debug('Created: %d, Hard Limit: %d' % (self.createdConnections, hardLimit)) client = self.create(*self.params[0], **self.params[1]) self.createdConnections += 1 logging.getLogger().debug('Pool: Created new connection. %s (in queue: %s, created: %s, maxSize: %s, hardMaxSize: %s)' % \ (client, self.queue.qsize(), self.createdConnections, self.maxSize, hardLimit)) finally: self.lock.release() if not client: logging.getLogger().debug('Pool: Waiting for connection. %s (in queue: %s, created: %s, maxSize: %s)' % \ (self.__class__, self.queue.qsize(), self.createdConnections, self.maxSize)) return self.get() return ConnectionWrapper(self, client) def getConnection(self): """Gets the object connection to the data source""" return self.get().getConnection() def close(self, client): """ Closes the connection to the client Notice: Implement this method in the child class """ return False def release(self, client): """ Returns connection back to the pool """ try: self.queue.put([time.time(), client], False) logging.getLogger().debug('Pool: Connection returned: %s' % client) except queue.Full: try: self.close(client) except Exception, ex: logging.getLogger().warn('Pool: Unable to close client. Got exception: %s' % ex) self.createdConnections -= 1 logging.getLogger().warn('Pool: Pool is full. Size: %d(Created: %d)' % (self.queue.qsize(), self.createdConnections)) def __repr__(self): return "Size: %d [%s:%s], Created: %d, Lost: %d" % (self.queue.qsize(), self.minSize, self.maxSize, self.createdConnections, (self.createdConnections - self.queue.qsize())) from threading import local class LocalConnectionPool(ConnectionPool): timeout = 120 # seconds """Connection Pool With only one connection per thread/greenlet""" def __new__(cls, minSize=5, maxSize=20, timeout=30, *args, **kwargs): """ @param size -- pool size @param *args @param **kwargs """ if not hasattr(cls,'_instance'): cls._instance = ConnectionPool.__new__(cls, minSize=5, maxSize=20, timeout=30, *args, **kwargs) cls._instance.local = local() return cls._instance def get(self): """Gets the object with the connection to the data source""" if hasattr(self.local, 'accessed') and self.local.accessed < time.time() - self.timeout: # hm - the same thread but not accessed recently try: logging.getLogger().debug('Deleting stale connection from the thread') del self.local.connection except: pass if not hasattr(self.local,'connection'): logging.getLogger().debug('Pool: New ThreadLocal Connection: %s' % self.__class__) self.local.connection = ConnectionPool.get(self) else: logging.getLogger().debug('Pool: Reusing Connection %s (%s)' % (self.local.connection, self.__class__)) self.local.accessed = time.time() return self.local.connection class NullConnectionPool(object): """ Makes a connection every time the get method is called """ timeout = 120 # seconds def __new__(cls, minSize=5, maxSize=20, timeout=30, *args, **kwargs): """ @param size -- pool size @param *args @param **kwargs """ if not hasattr(cls,'_instance'): cls._instance = object.__new__(cls) cls._instance.minSize = minSize cls._instance.maxSize = maxSize cls._instance.params = [args,kwargs] return cls._instance @classmethod def create(cls, *args, **kwargs): """ Makes the real connection to the data source and returns it. Notice: Implement the creation of the connection in the child class @return object """ return None def get(self): """Gets the object with the connection to the data source""" logging.getLogger().debug('Pool: Creating new client for: %s' % self.__class__) return ConnectionWrapper(self, self.create(*self.params[0],**self.params[1])) getConnection = get def release(self, client): try: self.close(client) except Exception, ex: logging.getLogger().warn('Pool: Unable to close client. Got exception: %s' % ex) def close(self, client): """ Closes the connection to the client Notice: Implement this method in the child class """ return False class LocalNullConnectionPool(NullConnectionPool): """ Makes a connection every new thread """ timeout = 120 # seconds def __new__(cls, minSize=5, maxSize=20, timeout=30, *args, **kwargs): """ @param size -- pool size @param *args @param **kwargs """ if not hasattr(cls,'_instance'): cls._instance = NullConnectionPool.__new__(cls, minSize, maxSize, timeout, *args, **kwargs) cls._instance.local = local() return cls._instance def get(self): """Gets the object with the connection to the data source""" if hasattr(self.local, 'accessed') and self.local.accessed < time.time() - self.timeout: # hm - the same thread but not accessed recently try: logging.getLogger().debug('Pool: Deleting stale connection from the thread %s' % self.__class__) del self.local.connection except: pass if not hasattr(self.local,'connection'): logging.getLogger().debug('Pool: New ThreadLocal Connection %s' % self.__class__) self.local.connection = NullConnectionPool.get(self) else: logging.getLogger().debug('Pool: Reusing Connection %s' % self.local.connection) self.local.accessed = time.time() return self.local.connection getConnection = get
{ "content_hash": "a677bc32576c0f4b77dec89d52fe2998", "timestamp": "", "source": "github", "line_count": 313, "max_line_length": 180, "avg_line_length": 37.373801916932905, "alnum_prop": 0.5759104120362455, "repo_name": "slaff/attachix", "id": "6e10f2edb396b0ceb5e46e31ac546d1b530728cf", "size": "11698", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "server/core/pool/base.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "45563" }, { "name": "JavaScript", "bytes": "6214571" }, { "name": "Python", "bytes": "299910" }, { "name": "Shell", "bytes": "3519" }, { "name": "XSLT", "bytes": "10984" } ], "symlink_target": "" }
import os import sys sys.path.append("../..") if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djangocelery_example.settings") from django.core.management import execute_from_command_line execute_from_command_line(sys.argv)
{ "content_hash": "284912afb6cb2615a3917669731bb756", "timestamp": "", "source": "github", "line_count": 11, "max_line_length": 84, "avg_line_length": 24.272727272727273, "alnum_prop": 0.700374531835206, "repo_name": "fatrix/django-golive", "id": "856850c37b8e434d17a21e8d223ba3d32e9a1d8c", "size": "289", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "project_examples/djangocelery_example/manage.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "CoffeeScript", "bytes": "1689" }, { "name": "Python", "bytes": "159877" }, { "name": "Shell", "bytes": "1352" } ], "symlink_target": "" }
import inspect import pathlib import gdsfactory as gf from gdsfactory.serialization import clean_value_json filepath = pathlib.Path(__file__).parent.absolute() / "components.rst" skip = {} skip_plot = [ "component_lattice", "component_sequence", "extend_port", "extend_ports_list", ] skip_settings = {"vias"} with open(filepath, "w+") as f: f.write( """ You can customize the Gdsfactory generic PDK Pcells for your fab and use it as an inspiration to build your own. Components ============================= """ ) for name in sorted(gf.components.cells.keys()): if name in skip or name.startswith("_"): continue print(name) sig = inspect.signature(gf.components.cells[name]) kwargs = ", ".join( [ f"{p}={repr(clean_value_json(sig.parameters[p].default))}" for p in sig.parameters if isinstance(sig.parameters[p].default, (int, float, str, tuple)) and p not in skip_settings ] ) if name in skip_plot: f.write( f""" {name} ---------------------------------------------------- .. autofunction:: gdsfactory.components.{name} """ ) else: f.write( f""" {name} ---------------------------------------------------- .. autofunction:: gdsfactory.components.{name} .. plot:: :include-source: import gdsfactory as gf c = gf.components.{name}({kwargs}) c.plot() """ )
{ "content_hash": "bff8b938087a64e66b2fa74f0aa4457d", "timestamp": "", "source": "github", "line_count": 76, "max_line_length": 112, "avg_line_length": 20.473684210526315, "alnum_prop": 0.5115681233933161, "repo_name": "gdsfactory/gdsfactory", "id": "5f3fbe66e93ef69c2ef2d7e8e11b5e5247b5f2d5", "size": "1556", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "docs/write_components_doc.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "605" }, { "name": "Dockerfile", "bytes": "31" }, { "name": "Makefile", "bytes": "4572" }, { "name": "Python", "bytes": "2471982" }, { "name": "Shell", "bytes": "671" }, { "name": "XS", "bytes": "10045" } ], "symlink_target": "" }
import ConfigParser import os import os.path as os_path from almanach.common.exceptions.almanach_exception import AlmanachException configuration = ConfigParser.RawConfigParser() def read(filename): if not os_path.isfile(filename): raise AlmanachException("Config file '{0}' not found".format(filename)) print("Loading configuration file {0}".format(filename)) configuration.read(filename) def get(section, option, default=None): value = os.environ.get(section + "_" + option.upper()) if value: return value try: return configuration.get(section, option) except: return default def volume_existence_threshold(): return int(get("ALMANACH", "volume_existence_threshold")) def auth_strategy(): return get("ALMANACH", "auth_strategy", "private_key") def auth_private_key(): return get("ALMANACH", "auth_token") def keystone_username(): return get("KEYSTONE", "username") def keystone_password(): return get("KEYSTONE", "password") def keystone_url(): return get("KEYSTONE", "auth_url") def keystone_tenant_name(): return get("KEYSTONE", "tenant_name") def device_metadata_whitelist(): return get("ALMANACH", "device_metadata_whitelist").split(',') def mongodb_url(): return get("MONGODB", "url", default=None) def mongodb_database(): return get("MONGODB", "database", default="almanach") def mongodb_indexes(): return get('MONGODB', 'indexes').split(',') def rabbitmq_url(): return get("RABBITMQ", "url", default=None) def rabbitmq_queue(): return get("RABBITMQ", "queue", default=None) def rabbitmq_exchange(): return get("RABBITMQ", "exchange", default=None) def rabbitmq_routing_key(): return get("RABBITMQ", "routing.key", default=None) def rabbitmq_retry(): return int(get("RABBITMQ", "retry.maximum", default=None)) def rabbitmq_retry_exchange(): return get("RABBITMQ", "retry.exchange", default=None) def rabbitmq_retry_return_exchange(): return get("RABBITMQ", "retry.return.exchange", default=None) def rabbitmq_retry_queue(): return get("RABBITMQ", "retry.queue", default=None) def rabbitmq_dead_queue(): return get("RABBITMQ", "dead.queue", default=None) def rabbitmq_dead_exchange(): return get("RABBITMQ", "dead.exchange", default=None) def rabbitmq_time_to_live(): return int(get("RABBITMQ", "retry.time.to.live", default=None)) def _read_file(filename): f = open(filename, "r") content = f.read() f.close() return content
{ "content_hash": "cd33f0413a42dbfb9ab61a9d4e2a4601", "timestamp": "", "source": "github", "line_count": 122, "max_line_length": 79, "avg_line_length": 20.918032786885245, "alnum_prop": 0.679858934169279, "repo_name": "internap/almanach", "id": "97307f508324d1af35db6d45e262bd4774be1481", "size": "3126", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "almanach/config.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "298040" }, { "name": "Shell", "bytes": "277" } ], "symlink_target": "" }
import sys import os from optparse import OptionParser import ConfigParser CONFIG_FILE = '/etc/stratuslab/pdisk-host.conf' class ListRegisteredVolumes(object): """ Utility to list the persistent disk volume URIs registered with a virtual machine. """ def __init__(self, args): self._read_configuration_file() self._process_arguments(args) self.registration_file = os.path.join(self.vm_dir, self.vm_id, self.register_filename) def _read_configuration_file(self): config = ConfigParser.ConfigParser() config.read(CONFIG_FILE) self.vm_dir = config.get('main', 'vm_dir') self.register_filename = config.get('main', 'register_filename') def _process_arguments(self, args): parser = OptionParser() parser.add_option("--vm-id", dest="vm_id", help="VM ID", metavar="ID") parser.add_option("--vm-dir", dest="vm_dir", help="directory where device will be created", metavar="DIR") options, _ = parser.parse_args(args) if not options.vm_id: raise parser.error('--vm-id option is mandatory') self.vm_id = options.vm_id if options.vm_dir: self.vm_dir = options.vm_dir def run(self): if os.path.exists(self.registration_file): with open(self.registration_file, 'r') as f: uris = f.read().splitlines() else: uris = [] unique = [] for uri in uris: uri = uri.strip() if uri and (not uri in unique): unique.append(uri) print "\n".join(unique) if __name__ == "__main__": ListRegisteredVolumes(sys.argv).run()
{ "content_hash": "fbace42ec931324174fb776621ba10b0", "timestamp": "", "source": "github", "line_count": 65, "max_line_length": 94, "avg_line_length": 26.96923076923077, "alnum_prop": 0.5767256132344553, "repo_name": "StratusLab/storage", "id": "33c40d4d9e045d7dbc280a3220461f218e0645fa", "size": "2399", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pdisk-host/tar/src/main/scripts/stratus-list-registered-volumes.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "ApacheConf", "bytes": "1283" }, { "name": "CSS", "bytes": "2306" }, { "name": "HTML", "bytes": "448" }, { "name": "Java", "bytes": "130012" }, { "name": "JavaScript", "bytes": "3387" }, { "name": "Python", "bytes": "37462" }, { "name": "Shell", "bytes": "14701" } ], "symlink_target": "" }
import flatbuffers from flatbuffers.compat import import_numpy np = import_numpy() class ReverseSequenceOptions(object): __slots__ = ['_tab'] @classmethod def GetRootAsReverseSequenceOptions(cls, buf, offset): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) x = ReverseSequenceOptions() x.Init(buf, n + offset) return x @classmethod def ReverseSequenceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) # ReverseSequenceOptions def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos) # ReverseSequenceOptions def SeqDim(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 # ReverseSequenceOptions def BatchDim(self): o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 def ReverseSequenceOptionsStart(builder): builder.StartObject(2) def ReverseSequenceOptionsAddSeqDim(builder, seqDim): builder.PrependInt32Slot(0, seqDim, 0) def ReverseSequenceOptionsAddBatchDim(builder, batchDim): builder.PrependInt32Slot(1, batchDim, 0) def ReverseSequenceOptionsEnd(builder): return builder.EndObject() class ReverseSequenceOptionsT(object): # ReverseSequenceOptionsT def __init__(self): self.seqDim = 0 # type: int self.batchDim = 0 # type: int @classmethod def InitFromBuf(cls, buf, pos): reverseSequenceOptions = ReverseSequenceOptions() reverseSequenceOptions.Init(buf, pos) return cls.InitFromObj(reverseSequenceOptions) @classmethod def InitFromObj(cls, reverseSequenceOptions): x = ReverseSequenceOptionsT() x._UnPack(reverseSequenceOptions) return x # ReverseSequenceOptionsT def _UnPack(self, reverseSequenceOptions): if reverseSequenceOptions is None: return self.seqDim = reverseSequenceOptions.SeqDim() self.batchDim = reverseSequenceOptions.BatchDim() # ReverseSequenceOptionsT def Pack(self, builder): ReverseSequenceOptionsStart(builder) ReverseSequenceOptionsAddSeqDim(builder, self.seqDim) ReverseSequenceOptionsAddBatchDim(builder, self.batchDim) reverseSequenceOptions = ReverseSequenceOptionsEnd(builder) return reverseSequenceOptions
{ "content_hash": "17c4ff2ddccc0dff9a84d95200f3f953", "timestamp": "", "source": "github", "line_count": 75, "max_line_length": 114, "avg_line_length": 36.06666666666667, "alnum_prop": 0.7046210720887246, "repo_name": "google-research/falken", "id": "c185b09844cc7c3eec0af6c6f7196fcc5e91bb3e", "size": "3373", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "service/generated_flatbuffers/tflite/ReverseSequenceOptions.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "27651" }, { "name": "C#", "bytes": "673937" }, { "name": "C++", "bytes": "1250409" }, { "name": "CMake", "bytes": "133649" }, { "name": "Java", "bytes": "6034" }, { "name": "JavaScript", "bytes": "112279" }, { "name": "Objective-C++", "bytes": "4177" }, { "name": "Python", "bytes": "1666229" }, { "name": "SWIG", "bytes": "27937" }, { "name": "ShaderLab", "bytes": "1473" }, { "name": "Shell", "bytes": "8257" } ], "symlink_target": "" }
import networkx as nx import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import operator import ast class DrawGraph: def __init__(self): pass # draw the graph specified in the bracket # save to output.png def draw_this_graph(self, k, pos, ind, mylist): # would like to share this property for all possible functions # may need optimization later plt.figure(figsize=(5, 5)) fig = plt.figure() # draw graph with pos, actually not necessary # just in case I mess up the weights g = nx.empty_graph(k) list_edge = ast.literal_eval(mylist[ind-1]) nx.draw_networkx(g, pos, edgelist=list_edge, node_size=20) fig.savefig('output.png') # a really ugly way to draw the most probable graph # I first convert the list of edge list to string so that I can compare and count them # then I need restore the strings back to list and draw graph with 'edgelist' argument # require k, number of nodes # pos, initial nodes positions # as I actually regenerate the graph from very beginning based on the sorted edge list # if only require an example of top 1%, current algorithm is ok # need improvement later def draw_most_probable(self, k, pos, edge_list, summary): # I/O histo = open('sorted_histogram', 'w') # again may require optimization plt.figure(figsize=(5, 5)) fig = plt.figure() # dump out initial 1/5 of the list # the value is related to T, so may need to be modified once T is changed ind = len(edge_list) ind = ind//5 edge_list = edge_list[ind:] # initialize a new dictionary hist = {} # begin count the histogram for elem in edge_list: if elem in hist: hist[elem] += 1 else: hist[elem] = 1 # sort the dictionary for output sorted_hist = sorted(hist.items(), key=operator.itemgetter(1), reverse=True) # write to file for i in range(len(sorted_hist)): print('{:6d}{}{}{:4d}'.format(i+1, ' ', sorted_hist[i][0], sorted_hist[i][1]), file=histo) histo.close() # convert the most probable string back to list list_edge = ast.literal_eval(sorted_hist[0][0]) print('{}{}{}{}{}{}{}{}'.format('total number of graphs generated under assumed equilibrium ', len(sorted_hist), "\n", 'most probable graph structure (edges): ', list_edge, "\n", 'number of occurrence of this graph: ', sorted_hist[0][1]), file=summary) # draw g = nx.empty_graph(k) nx.draw_networkx(g, pos, edgelist=list_edge, node_size=20) fig.savefig('top.png')
{ "content_hash": "ee19489264251ce4c1ace76c4b33780e", "timestamp": "", "source": "github", "line_count": 69, "max_line_length": 127, "avg_line_length": 41.21739130434783, "alnum_prop": 0.5938818565400844, "repo_name": "tautomer/mcmc", "id": "7a54a6c49e95467f6ed8b8b9102cc25aacc7f129", "size": "2966", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "mcmc/draw.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "2283" }, { "name": "Python", "bytes": "20395" } ], "symlink_target": "" }
from __future__ import absolute_import, unicode_literals urlpatterns = []
{ "content_hash": "d3b438f58a7b0296d8df0380140198d1", "timestamp": "", "source": "github", "line_count": 3, "max_line_length": 56, "avg_line_length": 25, "alnum_prop": 0.7466666666666667, "repo_name": "mspeedy/django-health-check", "id": "82e8f9dd5a88987ab49e60b047ac1ded88acab3a", "size": "99", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/testapp/urls.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "1292" }, { "name": "Python", "bytes": "27204" } ], "symlink_target": "" }
import collections import os from abstractions import * import data.jsonl DATA_DIRECTORY = 'data' USER_DIRECTORY = 'users' def load_data(user_dataset, review_dataset, restaurant_dataset): with open(os.path.join(DATA_DIRECTORY, user_dataset)) as f: user_data = jsonl.load(f) with open(os.path.join(DATA_DIRECTORY, review_dataset)) as f: review_data = jsonl.load(f) with open(os.path.join(DATA_DIRECTORY, restaurant_dataset)) as f: restaurant_data = jsonl.load(f) # Load users. userid_to_user = {} for user in user_data: name = user['name'] _user_id = user['user_id'] user = make_user(name, []) # MISSING: reviews userid_to_user[_user_id] = user # Load restaurants. busid_to_restaurant = {} for restaurant in restaurant_data: name = restaurant['name'] location = float(restaurant['latitude']), float(restaurant['longitude']) categories = restaurant['categories'] price = restaurant['price'] if price is not None: price = int(price) num_reviews = int(restaurant['review_count']) _business_id = restaurant['business_id'] restaurant = make_restaurant(name, location, categories, price, []) # MISSING: reviews busid_to_restaurant[_business_id] = restaurant # Load reviews. reviews = [] busid_to_reviews = collections.defaultdict(list) userid_to_reviews = collections.defaultdict(list) for review in review_data: _user_id = review['user_id'] _business_id = review['business_id'] restaurant = restaurant_name(busid_to_restaurant[_business_id]) rating = float(review['stars']) review = make_review(restaurant, rating) reviews.append(review) busid_to_reviews[_business_id].append(review) userid_to_reviews[_user_id].append(review) # Reviews done. restaurants = {} for busid, restaurant in busid_to_restaurant.items(): name = restaurant_name(restaurant) location = list(restaurant_location(restaurant)) categories = restaurant_categories(restaurant) price = restaurant_price(restaurant) restaurant_reviews = busid_to_reviews[busid] restaurant = make_restaurant(name, location, categories, price, restaurant_reviews) restaurants[name] = restaurant # Restaurants done. users = [] for userid, user in userid_to_user.items(): name = user_name(user) user_reviews = userid_to_reviews[userid] user = make_user(name, user_reviews) users.append(user) # Users done. return users, reviews, restaurants USERS, REVIEWS, RESTAURANTS = load_data('users.json', 'reviews.json', 'restaurants.json') CATEGORIES = {c for r in RESTAURANTS.values() for c in restaurant_categories(r)} def load_user_file(user_file): with open(os.path.join(USER_DIRECTORY, user_file)) as f: return eval(f.read()) import glob USER_FILES = [f[6:-4] for f in glob.glob('users/*.dat')]
{ "content_hash": "694cd4c42bd77a74974856522413321e", "timestamp": "", "source": "github", "line_count": 90, "max_line_length": 94, "avg_line_length": 33.77777777777778, "alnum_prop": 0.6480263157894737, "repo_name": "ajponte/yelpML", "id": "3da5c9bd728b0c898b658c1d29b21823f727e081", "size": "3040", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "maps/data/__init__.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "782" }, { "name": "JavaScript", "bytes": "3864" }, { "name": "Python", "bytes": "60036" } ], "symlink_target": "" }
""" Return node This one exits functions. The only other exit is the default exit of functions with 'None' value, if no return is done. """ from .NodeBases import ExpressionMixin, NodeBase, StatementChildrenHavingBase class StatementReturn(StatementChildrenHavingBase): kind = "STATEMENT_RETURN" named_children = ("expression",) nice_children = ("return value",) def __init__(self, expression, source_ref): StatementChildrenHavingBase.__init__( self, values = { "expression" : expression }, source_ref = source_ref ) getExpression = StatementChildrenHavingBase.childGetter( "expression" ) def isStatementAborting(self): return True def mayRaiseException(self, exception_type): return self.getExpression().mayRaiseException(exception_type) def computeStatement(self, constraint_collection): constraint_collection.onExpression(self.getExpression()) expression = self.getExpression() if expression.mayRaiseException(BaseException): constraint_collection.onExceptionRaiseExit(BaseException) if expression.willRaiseException(BaseException): from .NodeMakingHelpers import makeStatementExpressionOnlyReplacementNode result = makeStatementExpressionOnlyReplacementNode( expression = expression, node = self ) return result, "new_raise", """\ Return statement raises in returned expression, removed return.""" constraint_collection.onFunctionReturn() return self, None, None class StatementGeneratorReturn(StatementReturn): kind = "STATEMENT_GENERATOR_RETURN" def __init__(self, expression, source_ref): StatementReturn.__init__( self, expression = expression, source_ref = source_ref ) class ExpressionReturnedValueRef(NodeBase, ExpressionMixin): kind = "EXPRESSION_RETURNED_VALUE_REF" def __init__(self, source_ref): NodeBase.__init__( self, source_ref = source_ref ) def computeExpression(self, constraint_collection): # TODO: Might be predictable based on the exception handler this is in. return self, None, None def mayHaveSideEffects(self): # Referencing the expression type has no side effect return False def mayRaiseException(self, exception_type): return False
{ "content_hash": "4b99f75c6aed1f53610c594be3c85737", "timestamp": "", "source": "github", "line_count": 86, "max_line_length": 119, "avg_line_length": 29.523255813953487, "alnum_prop": 0.6522252855454903, "repo_name": "wfxiang08/Nuitka", "id": "dcacdfe2e5096c4329034200da995f96d3a262d1", "size": "3319", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "nuitka/nodes/ReturnNodes.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Assembly", "bytes": "5518" }, { "name": "Batchfile", "bytes": "1810" }, { "name": "C", "bytes": "36149" }, { "name": "C++", "bytes": "441058" }, { "name": "Python", "bytes": "4431574" }, { "name": "Shell", "bytes": "2059" } ], "symlink_target": "" }
"""Replay components for DQN-type agents.""" import collections import typing from typing import Any, Callable, Generic, Iterable, List, Mapping, Optional, Sequence, Text, Tuple, TypeVar import dm_env import numpy as np import snappy from tandem_dqn import parts CompressedArray = Tuple[bytes, Tuple, np.dtype] # Generic replay structure: Any flat named tuple. ReplayStructure = TypeVar('ReplayStructure', bound=Tuple[Any, ...]) class Transition(typing.NamedTuple): s_tm1: Optional[np.ndarray] a_tm1: Optional[parts.Action] r_t: Optional[float] discount_t: Optional[float] s_t: Optional[np.ndarray] a_t: Optional[parts.Action] = None mc_return_tm1: Optional[float] = None class TransitionReplay(Generic[ReplayStructure]): """Uniform replay, with circular buffer storage for flat named tuples.""" def __init__(self, capacity: int, structure: ReplayStructure, random_state: np.random.RandomState, encoder: Optional[Callable[[ReplayStructure], Any]] = None, decoder: Optional[Callable[[Any], ReplayStructure]] = None): self._capacity = capacity self._structure = structure self._random_state = random_state self._encoder = encoder or (lambda s: s) self._decoder = decoder or (lambda s: s) self._storage = [None] * capacity self._num_added = 0 def add(self, item: ReplayStructure) -> None: """Adds single item to replay.""" self._storage[self._num_added % self._capacity] = self._encoder(item) self._num_added += 1 def get(self, indices: Sequence[int]) -> List[ReplayStructure]: """Retrieves items by indices.""" return [self._decoder(self._storage[i]) for i in indices] def sample(self, size: int) -> ReplayStructure: """Samples batch of items from replay uniformly, with replacement.""" indices = self._random_state.choice(self.size, size=size, replace=True) samples = self.get(indices) transposed = zip(*samples) stacked = [np.stack(xs, axis=0) for xs in transposed] return type(self._structure)(*stacked) # pytype: disable=not-callable @property def size(self) -> int: """Number of items currently contained in replay.""" return min(self._num_added, self._capacity) @property def capacity(self) -> int: """Total capacity of replay (max number of items stored at any one time).""" return self._capacity def get_state(self) -> Mapping[Text, Any]: """Retrieves replay state as a dictionary (e.g. for serialization).""" return { 'storage': self._storage, 'num_added': self._num_added, } def set_state(self, state: Mapping[Text, Any]) -> None: """Sets replay state from a (potentially de-serialized) dictionary.""" self._storage = state['storage'] self._num_added = state['num_added'] class TransitionAccumulatorWithMCReturn: """Accumulates timesteps to transitions with MC returns.""" def __init__(self): self._transitions = collections.deque() self.reset() def step(self, timestep_t: dm_env.TimeStep, a_t: parts.Action) -> Iterable[Transition]: """Accumulates timestep and resulting action, maybe yields transitions.""" if timestep_t.first(): self.reset() # There are no transitions on the first timestep. if self._timestep_tm1 is None: assert self._a_tm1 is None if not timestep_t.first(): raise ValueError('Expected FIRST timestep, got %s.' % str(timestep_t)) self._timestep_tm1 = timestep_t self._a_tm1 = a_t return # Empty iterable. self._transitions.append( Transition( s_tm1=self._timestep_tm1.observation, a_tm1=self._a_tm1, r_t=timestep_t.reward, discount_t=timestep_t.discount, s_t=timestep_t.observation, a_t=a_t, mc_return_tm1=None, )) self._timestep_tm1 = timestep_t self._a_tm1 = a_t if timestep_t.last(): # Annotate all episode transitions with their MC returns. mc_return = 0 mc_transitions = [] while self._transitions: transition = self._transitions.pop() mc_return = transition.discount_t * mc_return + transition.r_t mc_transitions.append(transition._replace(mc_return_tm1=mc_return)) for transition in reversed(mc_transitions): yield transition else: # Wait for episode end before yielding anything. return def reset(self) -> None: """Resets the accumulator. Following timestep is expected to be FIRST.""" self._transitions.clear() self._timestep_tm1 = None self._a_tm1 = None def compress_array(array: np.ndarray) -> CompressedArray: """Compresses a numpy array with snappy.""" return snappy.compress(array), array.shape, array.dtype def uncompress_array(compressed: CompressedArray) -> np.ndarray: """Uncompresses a numpy array with snappy given its shape and dtype.""" compressed_array, shape, dtype = compressed byte_string = snappy.uncompress(compressed_array) return np.frombuffer(byte_string, dtype=dtype).reshape(shape)
{ "content_hash": "a07a5e98c65c4099cdd85eaf5821a113", "timestamp": "", "source": "github", "line_count": 154, "max_line_length": 108, "avg_line_length": 33.29220779220779, "alnum_prop": 0.6621806124439243, "repo_name": "deepmind/deepmind-research", "id": "e9196901331790226f94aa885f2972a400991d79", "size": "5721", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tandem_dqn/replay.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "1002" }, { "name": "C++", "bytes": "5765" }, { "name": "Jupyter Notebook", "bytes": "12330730" }, { "name": "Lua", "bytes": "76186" }, { "name": "OpenEdge ABL", "bytes": "15630" }, { "name": "PureBasic", "bytes": "8" }, { "name": "Python", "bytes": "3419119" }, { "name": "Racket", "bytes": "226692" }, { "name": "Shell", "bytes": "84450" }, { "name": "Starlark", "bytes": "3463" } ], "symlink_target": "" }
from collections import namedtuple import dill as pickle # dill supports 'pickling nested classes' from database import DatabaseTableModel from tableview import * class TableController(object): # class defaults __default_join_type = 'inner' __default_direction = 'full' def __init__(self, table_model, view_class=SimpleTableView): assert issubclass(table_model.__class__, DatabaseTableModel) assert issubclass(view_class, TableView) self.__table = table_model self.__view = view_class def __getitem__(self, item): return self.__table.get_row(item) def get_item(self, index, name): return self.__table.get_item(index, name) def set_item(self, index, name, value): self.__table.set_item(index, name, value) def add_row(self, row): self.__table += row def del_row(self, index): self.__table.del_row(index) def fields(self): return self.__table.fields() def find(self, values, columns): """Finds first matching row by :columns: and :values:""" for column in columns: for row in self.__table: if row[column] in values: return row def update_view(self): self.__view.update(self) def add_row_visual(self): self.__view.add_to_table(self.__table) def edit_row_visual(self): self.__view.edit_table(self.__table) def delete_row_visual(self): return self.__view.delete_row(self.__table) def search(self): self.__view.search(self) def dump(self): with open(self.__table.name + ".b", "wb") as f: pickle.dump(self.__table, f) def load(self): with open(self.__table.name + ".b") as f: self.__table = pickle.load(f) def subtable(self, *args, **kwargs): """Creates a subtable controller object from controlled table. :args: tuple with names of columns :kwargs: rows: tuple with numbers of rows filter: filter predicate filter_columns: columns to affect filter""" if args is None: _columns = self.__table.fields() else: _columns = args if 'rows' in kwargs: _rows = kwargs['rows'] else: _rows = range(len(self.__table)) if 'filter' in kwargs: _filter = kwargs['filter'] else: def _filter(x): return True if 'filter_columns' in kwargs: if isinstance(kwargs['filter_columns'], list) \ or isinstance(kwargs['filter_columns'], tuple): _filter_columns = kwargs['filter_columns'] else: _filter_columns = [kwargs['filter_columns']] else: _filter_columns = self.__table.fields() # collecting values gathered_table = DatabaseTableModel(_columns) for index in _rows: gathered_row = map(lambda x: self.__table[index][x], _columns) # apply filter filter_good = True for filter_field in _filter_columns: if not _filter(self.__table[index][filter_field]): filter_good = False break if filter_good: gathered_table += gathered_row return TableController(gathered_table, self.__view) def join(self, other, *args, **kwargs): return self.__generic_join(self.__table, other, *args, **kwargs) @staticmethod def __generic_join(table1, table2, *args, **kwargs): """JOIN method that accepts keywords. other - other table class args = a tuple of tuples: column_1, column_2 - columns to join from self and other relation-predicate - boolean function that accepts at least 2 values kwargs: type = type of join - 'inner' 'outer' 'cross' direction = direction of join 'left' 'right' 'full'. Useless with inner join. fields_1, fields_2 = names of fields to concat into result table """ def get_fields(table, fields, index): return map(lambda field: table[index][field], fields) # get parameters if 'type' in kwargs: _type = kwargs['type'] else: _type = table1._default_join_type # set if _type == 'inner': _direction = 'full' elif _type != 'inner': _direction = kwargs['direction'] else: _direction = table1._default_direction if 'fields_1' in kwargs: if isinstance(kwargs['fields_1'], list) \ or isinstance(kwargs['fields_1'], tuple): _fields_1 = kwargs['fields_1'] else: _fields_1 = tuple(kwargs['fields_1']) else: _fields_1 = table1.fields() if 'fields_2' in kwargs: if isinstance(kwargs['fields_2'], list) \ or isinstance(kwargs['fields_2'], tuple): _fields_1 = kwargs['fields_2'] else: _fields_1 = tuple(kwargs['fields_2']) else: _fields_2 = table2.fields() # make temporary index temporary_index_left_center = dict() temporary_index_right_center = dict() temporary_index_left = list() temporary_index_right = list() temporary_index_copy = None for relation in args: # find matching records assert issubclass(table1, DatabaseTableModel) and issubclass(table2, DatabaseTableModel) for index_row_1, row_1 in enumerate(table1): for index_row_2, row_2 in enumerate(table2): if relation[2](row_1[relation[0]], row_2[relation[1]]): if row_1 in temporary_index_left_center: temporary_index_left_center[index_row_1].append(index_row_2) else: temporary_index_left_center[index_row_1] = [index_row_2] if row_2 in temporary_index_right_center: temporary_index_right_center[index_row_2].append(index_row_1) else: temporary_index_right_center[index_row_2] = [index_row_1] # ANDing relations (intersection) if temporary_index_copy: temporary_index_left_center = {x: temporary_index_copy[x] for x in temporary_index_left_center if x in temporary_index_copy} # copy temporary_index_copy = temporary_index_left_center.copy() # outer join index if _type == 'outer': if _direction == 'left': for row_1 in range(table1): if row_1 not in temporary_index_left_center: temporary_index_left.append(row_1) elif _direction == 'right': for row_2 in range(table2): if row_2 not in temporary_index_right_center: temporary_index_right.append(row_2) elif _direction == 'full': for row_1 in range(table1): if row_1 not in temporary_index_left_center: temporary_index_left.append(row_1) for row_2 in range(table2): if row_2 not in temporary_index_right_center: temporary_index_right.append(row_2) # deleting unused index del temporary_index_right_center # populating new table # fields maintain the given order because of namedtuple() gathered_table = DatabaseTableModel(_fields_1 + _fields_2) # add center index for index_row_1, indexes_rows_2 in temporary_index_left_center.items(): row_1_data = get_fields(table1, _fields_1, index_row_1) row_2_data = map(lambda index_row_2: get_fields(table2, _fields_2, index_row_2), indexes_rows_2) # add the fields in-order gathered_table += row_1_data + row_2_data # add blank left index for index_row_1 in temporary_index_left: row_1_data = get_fields(table1, _fields_1, index_row_1) # fill with None row_2_data = [None] * len(_fields_2) # gather fields gathered_table += row_1_data + row_2_data # add blank right index for index_row_2 in temporary_index_right: row_2_data = get_fields(table2, _fields_2, index_row_2) # fill with None row_1_data = [None] * len(_fields_1) # gather fields gathered_table += row_1_data + row_2_data # return completed table return gathered_table class VisualRelationalTableController(object): __relation_class = namedtuple("TableRelation", "master slave") def __init__(self, master_controller, slave_controller, foreign_key=()): """Init function. :type foreign_key: Tuple of 2 values: master_column, slave_column - values in slave_column bound to values in master_column :type master_controller: TableController :type slave_controller: TableController """ # check values assert issubclass(slave_controller.__class__, TableController) self.__controller_slave = slave_controller assert issubclass(master_controller.__class__, TableController) self.__controller_master = master_controller self.__relation = self.__relation_class._make(foreign_key) # setters def add_row_master(self): self.__controller_master.add_row_visual() def add_row_slave(self): # set_row = \ self.__controller_slave.add_row_visual() # found_in_master = self.__controller_master.find(set_row[self.__relation["slave"]], # self.__relation["master"]) # if found_in_master: # add to table master # deletion (visual) def delete_slave(self): self.__controller_slave.delete_row_visual() def delete_master(self): deleted_line = self.__controller_master.delete_row_visual() # delete matching foreign keys if the row is deleted if deleted_line: key_to_delete = deleted_line[self.__relation[0]] self.__controller_slave = \ self.__controller_slave.subtable(filter=(lambda x: False if x == key_to_delete else True), filter_columns=self.__relation[1]) def search_master(self): return self.__controller_master.search() def search_slave(self): return self.__controller_slave.search() def edit_master(self): return self.__controller_master.edit_row_visual() def edit_slave(self): return self.__controller_slave.edit_row_visual() def update_view_master(self): return self.__controller_master.update_view() def update_view_slave(self): return self.__controller_slave.update_view() def dump(self): self.__controller_master.dump() self.__controller_slave.dump() print "Tables dumped!\n" def load(self): self.__controller_master.load() self.__controller_slave.load() print "Tables loaded!\n" def print_task_result(self): ''' task: authors with more than 100 pages in a book authors must be master, books - slave ( this is ugly, sorry :( )''' def filter_hundred(x): try: int_x = int(x) except: return False if int_x > 100: return True return False found_authors = self.__controller_slave.subtable(("Author"), filter=filter_hundred, filter_columns="NumOfPages") print tabulate(found_authors, headers='keys', tablefmt="grid") raw_input("Press Enter to continue...")
{ "content_hash": "d572e796e996f278e4c486ba4fc790b7", "timestamp": "", "source": "github", "line_count": 327, "max_line_length": 106, "avg_line_length": 37.75229357798165, "alnum_prop": 0.5491292021061158, "repo_name": "sanchaez/python_labs", "id": "146db54aed4ec2bfd7115c8f2160862731654953", "size": "12345", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Lab1/tablecontroller.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "134" }, { "name": "HTML", "bytes": "24445" }, { "name": "JavaScript", "bytes": "18616" }, { "name": "Python", "bytes": "53410" } ], "symlink_target": "" }
""" Flip API Flip # noqa: E501 The version of the OpenAPI document: 3.1 Contact: cloudsupport@telestream.net Generated by: https://openapi-generator.tech """ import pprint import re # noqa: F401 import six from telestream_cloud_flip.configuration import Configuration class DeletedResponse(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'deleted': 'bool' } attribute_map = { 'deleted': 'deleted' } def __init__(self, deleted=None, local_vars_configuration=None): # noqa: E501 """DeletedResponse - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._deleted = None self.discriminator = None if deleted is not None: self.deleted = deleted @property def deleted(self): """Gets the deleted of this DeletedResponse. # noqa: E501 Informs whether an object has been deleted successfully. # noqa: E501 :return: The deleted of this DeletedResponse. # noqa: E501 :rtype: bool """ return self._deleted @deleted.setter def deleted(self, deleted): """Sets the deleted of this DeletedResponse. Informs whether an object has been deleted successfully. # noqa: E501 :param deleted: The deleted of this DeletedResponse. # noqa: E501 :type: bool """ self._deleted = deleted def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, DeletedResponse): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, DeletedResponse): return True return self.to_dict() != other.to_dict()
{ "content_hash": "a8b410b139ca0eb798293112543f79a9", "timestamp": "", "source": "github", "line_count": 121, "max_line_length": 82, "avg_line_length": 28.330578512396695, "alnum_prop": 0.5644690781796966, "repo_name": "Telestream/telestream-cloud-python-sdk", "id": "ea128793c11d87903e8cb11ccde872e26ce05752", "size": "3445", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "telestream_cloud_flip_sdk/telestream_cloud_flip/models/deleted_response.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "1339719" }, { "name": "Shell", "bytes": "6712" } ], "symlink_target": "" }
from django.core.urlresolvers import reverse from core.mixins import JSONView, JSONListView from .mixins import VideoListMixin, VideoDetailMixin, VideoUserListMixin from .mixins import VideoByTagListMixin, TagListMixin def _get_item(db_video, request): href_relative_uri = reverse('api:videos:video', kwargs={'pk': db_video.id, 'format': '.json'}) return { 'type': 'video', 'id': { 'id': db_video.id, 'id_source': db_video.id_source, }, 'href': request.build_absolute_uri(href_relative_uri), 'source': db_video.source, 'user': db_video.user.username, 'title': db_video.title, 'description': db_video.description, 'duration': db_video.duration, 'created_at': db_video.created, 'modified_at': db_video.modified, 'filename': db_video.filename, 'thumbnail': { 'height': db_video.thumbnail.height if db_video.thumbnail else 0, 'width': db_video.thumbnail.width if db_video.thumbnail else 0, 'url': db_video.thumbnail.url if db_video.thumbnail else '' }, 'tags': [tag.name for tag in db_video.tags.all()] } class VideoListJSON(JSONListView, VideoListMixin): """ List of Videos """ def __init__(self): self.type = 'video_list' self.items = [] def craft_response(self, context, **response_kwargs): self.items = [_get_item(db_video, self.request) for db_video in context['video_list']] return super(VideoListJSON, self)\ .craft_response(context, **response_kwargs) class VideoDetailJSON(JSONView, VideoDetailMixin): """ Video details """ def craft_response(self, context, **response_kwargs): db_video = context['object'] return _get_item(db_video, self.request) class VideoUserListJSON(JSONListView, VideoUserListMixin): """ Video user list """ def __init__(self): self.type = 'video_list' self.items = [] def craft_response(self, context, **response_kwargs): self.items = [_get_item(db_video, self.request) for db_video in context['video_list']] return super(VideoUserListJSON, self)\ .craft_response(context, **response_kwargs) class VideoAnalyticJSON(JSONView, VideoDetailMixin): """ Video analytic """ def craft_response(self, context, **response_kwargs): db_video = context['object'] href_relative_uri = reverse('api:videos:video_analytic', kwargs={'pk': db_video.id, 'format': '.json'}) response = { 'type': 'video_analytic', 'href': self.request.build_absolute_uri(href_relative_uri), 'video_id': db_video.id, 'views': { 'total_views': db_video.analytic.views, 'unique_views': db_video.analytic.unique_views, }, 'shares': db_video.analytic.shares, } return response class VideoRatingJSON(JSONView, VideoDetailMixin): """ Video rating """ def craft_response(self, context, **response_kwargs): db_video = context['object'] href_relative_uri = reverse('api:videos:video_rating', kwargs={'pk': db_video.id, 'format': '.json'}) response = { 'type': 'video_rating', 'href': self.request.build_absolute_uri(href_relative_uri), 'video_id': db_video.id, 'upvotes': db_video.rating.upvotes, 'downvotes': db_video.rating.downvotes, } return response class VideoByTagListJSON(JSONListView, VideoByTagListMixin): """ List of videos by Tags """ def __init__(self): self.type = 'video_list' self.items = [] def craft_response(self, context, **response_kwargs): self.items = [_get_item(db_video, self.request) for db_video in context['video_list']] return super(VideoByTagListJSON, self)\ .craft_response(context, **response_kwargs) class TagListJSON(JSONListView, TagListMixin): """ List of all tags """ def __init__(self): self.type = 'tag_list' self.items = [] def craft_response(self, context, **response_kwargs): self.items = [tag.name for tag in context['tag_list']] return super(TagListJSON, self)\ .craft_response(context, **response_kwargs)
{ "content_hash": "5378b63662cced4a0fe554ecf609b152", "timestamp": "", "source": "github", "line_count": 149, "max_line_length": 77, "avg_line_length": 31.63758389261745, "alnum_prop": 0.5630038184132372, "repo_name": "lotube/lotube", "id": "def975cdb969e43d003064c7d3329d2076f3b9b9", "size": "4714", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "lotube/videos/views_api_json.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "8456" }, { "name": "Python", "bytes": "65219" } ], "symlink_target": "" }
from .audiogrep import *
{ "content_hash": "da08305e50e89e79d46f42b7b9b2809b", "timestamp": "", "source": "github", "line_count": 1, "max_line_length": 24, "avg_line_length": 25, "alnum_prop": 0.76, "repo_name": "antiboredom/audiogrep", "id": "b996c17656ec9b46727cab504f3b11d955d29691", "size": "25", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "audiogrep/__init__.py", "mode": "33261", "license": "mit", "language": [ { "name": "Python", "bytes": "15976" } ], "symlink_target": "" }
def remove_trailing_slashes(url: str) -> str: while url.endswith("/"): url = url[:-1] return url
{ "content_hash": "f8d7267faecab9017a00866b34e68d7f", "timestamp": "", "source": "github", "line_count": 4, "max_line_length": 45, "avg_line_length": 28.25, "alnum_prop": 0.5752212389380531, "repo_name": "linkedin/WhereHows", "id": "384ed34e4708ee61eb9b947babe9002a5b81a5b6", "size": "113", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "metadata-ingestion/src/datahub/utilities/config_clean.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "110129" }, { "name": "Dockerfile", "bytes": "2521" }, { "name": "HTML", "bytes": "131513" }, { "name": "Java", "bytes": "1307442" }, { "name": "JavaScript", "bytes": "148450" }, { "name": "Nearley", "bytes": "2837" }, { "name": "Python", "bytes": "1419332" }, { "name": "Shell", "bytes": "2564" }, { "name": "TSQL", "bytes": "42644" }, { "name": "TypeScript", "bytes": "641014" } ], "symlink_target": "" }
from __future__ import absolute_import from __future__ import print_function import logging import mock import requests from typing import Any, Dict, Tuple, Text, Optional from requests import Response from zerver.lib.outgoing_webhook import do_rest_call, OutgoingWebhookServiceInterface from zerver.lib.test_classes import ZulipTestCase from zerver.models import get_realm, get_user from builtins import object class ResponseMock(object): def __init__(self, status_code, data, content): # type: (int, Any, str) -> None self.status_code = status_code self.data = data self.content = content def request_exception_error(http_method, final_url, data, **request_kwargs): # type: (Any, Any, Any, **Any) -> Any raise requests.exceptions.RequestException("I'm a generic exception :(") def timeout_error(http_method, final_url, data, **request_kwargs): # type: (Any, Any, Any, **Any) -> Any raise requests.exceptions.Timeout() class MockServiceHandler(OutgoingWebhookServiceInterface): def process_success(self, response, event): # type: (Response, Dict[Text, Any]) -> Optional[str] return "Success!" service_handler = MockServiceHandler(None, None, None, None) class DoRestCallTests(ZulipTestCase): def setUp(self): # type: () -> None realm = get_realm("zulip") user_profile = get_user("outgoing-webhook@zulip.com", realm) self.mock_event = { # In the tests there is no active queue processor, so retries don't get processed. # Therefore, we need to emulate `retry_event` in the last stage when the maximum # retries have been exceeded. 'failed_tries': 3, 'message': {'display_recipient': 'Verona', 'subject': 'Foo', 'id': '', 'type': 'stream'}, 'user_profile_id': user_profile.id, 'command': '', 'service_name': ''} self.rest_operation = {'method': "POST", 'relative_url_path': "", 'request_kwargs': {}, 'base_url': ""} self.bot_user = self.example_user('outgoing_webhook_bot') logging.disable(logging.WARNING) @mock.patch('zerver.lib.outgoing_webhook.succeed_with_message') def test_successful_request(self, mock_succeed_with_message): # type: (mock.Mock) -> None response = ResponseMock(200, {"message": "testing"}, '') with mock.patch('requests.request', return_value=response): do_rest_call(self.rest_operation, None, self.mock_event, service_handler, None) self.assertTrue(mock_succeed_with_message.called) def test_retry_request(self): # type: (mock.Mock) -> None response = ResponseMock(500, {"message": "testing"}, '') self.mock_event['failed_tries'] = 3 with mock.patch('requests.request', return_value=response): do_rest_call(self.rest_operation, None, self.mock_event, service_handler, None) bot_owner_notification = self.get_last_message() self.assertEqual(bot_owner_notification.content, "[A message](http://testserver/#narrow/stream/Verona/subject/Foo/near/) triggered an outgoing webhook.") self.assertEqual(bot_owner_notification.recipient_id, self.bot_user.bot_owner.id) self.mock_event['failed_tries'] = 0 @mock.patch('zerver.lib.outgoing_webhook.fail_with_message') def test_fail_request(self, mock_fail_with_message): # type: (mock.Mock) -> None response = ResponseMock(400, {"message": "testing"}, '') with mock.patch('requests.request', return_value=response): do_rest_call(self.rest_operation, None, self.mock_event, service_handler, None) bot_owner_notification = self.get_last_message() self.assertTrue(mock_fail_with_message.called) self.assertEqual(bot_owner_notification.content, '''[A message](http://testserver/#narrow/stream/Verona/subject/Foo/near/) triggered an outgoing webhook. The webhook got a response with status code *400*.''') self.assertEqual(bot_owner_notification.recipient_id, self.bot_user.bot_owner.id) @mock.patch('logging.info') @mock.patch('requests.request', side_effect=timeout_error) def test_timeout_request(self, mock_requests_request, mock_logger): # type: (mock.Mock, mock.Mock, mock.Mock) -> None do_rest_call(self.rest_operation, None, self.mock_event, service_handler, None) bot_owner_notification = self.get_last_message() self.assertEqual(bot_owner_notification.content, '''[A message](http://testserver/#narrow/stream/Verona/subject/Foo/near/) triggered an outgoing webhook.''') self.assertEqual(bot_owner_notification.recipient_id, self.bot_user.bot_owner.id) @mock.patch('logging.exception') @mock.patch('requests.request', side_effect=request_exception_error) @mock.patch('zerver.lib.outgoing_webhook.fail_with_message') def test_request_exception(self, mock_fail_with_message, mock_requests_request, mock_logger): # type: (mock.Mock, mock.Mock, mock.Mock) -> None do_rest_call(self.rest_operation, None, self.mock_event, service_handler, None) bot_owner_notification = self.get_last_message() self.assertTrue(mock_fail_with_message.called) self.assertEqual(bot_owner_notification.content, '''[A message](http://testserver/#narrow/stream/Verona/subject/Foo/near/) triggered an outgoing webhook. When trying to send a request to the webhook service, an exception of type RequestException occured: ``` I'm a generic exception :( ```''') self.assertEqual(bot_owner_notification.recipient_id, self.bot_user.bot_owner.id)
{ "content_hash": "a6e7147c8b45c51aeefc26e41cb737f0", "timestamp": "", "source": "github", "line_count": 120, "max_line_length": 133, "avg_line_length": 49.583333333333336, "alnum_prop": 0.6443697478991597, "repo_name": "verma-varsha/zulip", "id": "97f9fa43fbdda3c72cf45a89ee8011291b13ab85", "size": "5974", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "zerver/tests/test_outgoing_webhook_system.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "426706" }, { "name": "Emacs Lisp", "bytes": "158" }, { "name": "HTML", "bytes": "489996" }, { "name": "JavaScript", "bytes": "2151770" }, { "name": "Nginx", "bytes": "1280" }, { "name": "Pascal", "bytes": "1113" }, { "name": "Perl", "bytes": "401825" }, { "name": "Puppet", "bytes": "85239" }, { "name": "Python", "bytes": "3780334" }, { "name": "Ruby", "bytes": "249744" }, { "name": "Shell", "bytes": "45134" } ], "symlink_target": "" }
""" Tests for the API /ports/ methods. """ import datetime import mock from oslo_config import cfg from oslo_utils import timeutils from oslo_utils import uuidutils import six from six.moves import http_client from six.moves.urllib import parse as urlparse from testtools import matchers from wsme import types as wtypes from ironic.api.controllers import base as api_base from ironic.api.controllers import v1 as api_v1 from ironic.api.controllers.v1 import notification_utils from ironic.api.controllers.v1 import port as api_port from ironic.api.controllers.v1 import utils as api_utils from ironic.api.controllers.v1 import versions from ironic.common import exception from ironic.common import utils as common_utils from ironic.conductor import rpcapi from ironic import objects from ironic.objects import fields as obj_fields from ironic.tests import base from ironic.tests.unit.api import base as test_api_base from ironic.tests.unit.api import utils as apiutils from ironic.tests.unit.db import utils as dbutils from ironic.tests.unit.objects import utils as obj_utils # NOTE(lucasagomes): When creating a port via API (POST) # we have to use node_uuid and portgroup_uuid def post_get_test_port(**kw): port = apiutils.port_post_data(**kw) node = dbutils.get_test_node() portgroup = dbutils.get_test_portgroup() port['node_uuid'] = kw.get('node_uuid', node['uuid']) port['portgroup_uuid'] = kw.get('portgroup_uuid', portgroup['uuid']) return port class TestPortObject(base.TestCase): @mock.patch("pecan.request") def test_port_init(self, mock_pecan_req): mock_pecan_req.version.minor = 1 port_dict = apiutils.port_post_data(node_id=None, portgroup_uuid=None) del port_dict['extra'] port = api_port.Port(**port_dict) self.assertEqual(wtypes.Unset, port.extra) class TestListPorts(test_api_base.BaseApiTest): def setUp(self): super(TestListPorts, self).setUp() self.node = obj_utils.create_test_node(self.context) def test_empty(self): data = self.get_json('/ports') self.assertEqual([], data['ports']) def test_one(self): port = obj_utils.create_test_port(self.context, node_id=self.node.id) data = self.get_json('/ports') self.assertEqual(port.uuid, data['ports'][0]["uuid"]) self.assertNotIn('extra', data['ports'][0]) self.assertNotIn('node_uuid', data['ports'][0]) # never expose the node_id self.assertNotIn('node_id', data['ports'][0]) def test_get_one(self): port = obj_utils.create_test_port(self.context, node_id=self.node.id) data = self.get_json('/ports/%s' % port.uuid) self.assertEqual(port.uuid, data['uuid']) self.assertIn('extra', data) self.assertIn('node_uuid', data) # never expose the node_id, port_id, portgroup_id self.assertNotIn('node_id', data) self.assertNotIn('port_id', data) self.assertNotIn('portgroup_id', data) self.assertNotIn('portgroup_uuid', data) def test_get_one_portgroup_is_none(self): port = obj_utils.create_test_port(self.context, node_id=self.node.id) data = self.get_json('/ports/%s' % port.uuid, headers={api_base.Version.string: '1.24'}) self.assertEqual(port.uuid, data['uuid']) self.assertIn('extra', data) self.assertIn('node_uuid', data) # never expose the node_id, port_id, portgroup_id self.assertNotIn('node_id', data) self.assertNotIn('port_id', data) self.assertNotIn('portgroup_id', data) self.assertIn('portgroup_uuid', data) def test_get_one_custom_fields(self): port = obj_utils.create_test_port(self.context, node_id=self.node.id) fields = 'address,extra' data = self.get_json( '/ports/%s?fields=%s' % (port.uuid, fields), headers={api_base.Version.string: str(api_v1.MAX_VER)}) # We always append "links" self.assertItemsEqual(['address', 'extra', 'links'], data) def test_hide_fields_in_newer_versions_internal_info(self): port = obj_utils.create_test_port(self.context, node_id=self.node.id, internal_info={"foo": "bar"}) data = self.get_json( '/ports/%s' % port.uuid, headers={api_base.Version.string: str(api_v1.MIN_VER)}) self.assertNotIn('internal_info', data) data = self.get_json('/ports/%s' % port.uuid, headers={api_base.Version.string: "1.18"}) self.assertEqual({"foo": "bar"}, data['internal_info']) def test_get_collection_custom_fields(self): fields = 'uuid,extra' for i in range(3): obj_utils.create_test_port(self.context, node_id=self.node.id, uuid=uuidutils.generate_uuid(), address='52:54:00:cf:2d:3%s' % i) data = self.get_json( '/ports?fields=%s' % fields, headers={api_base.Version.string: str(api_v1.MAX_VER)}) self.assertEqual(3, len(data['ports'])) for port in data['ports']: # We always append "links" self.assertItemsEqual(['uuid', 'extra', 'links'], port) def test_get_custom_fields_invalid_fields(self): port = obj_utils.create_test_port(self.context, node_id=self.node.id) fields = 'uuid,spongebob' response = self.get_json( '/ports/%s?fields=%s' % (port.uuid, fields), headers={api_base.Version.string: str(api_v1.MAX_VER)}, expect_errors=True) self.assertEqual(http_client.BAD_REQUEST, response.status_int) self.assertEqual('application/json', response.content_type) self.assertIn('spongebob', response.json['error_message']) def test_get_custom_fields_invalid_api_version(self): port = obj_utils.create_test_port(self.context, node_id=self.node.id) fields = 'uuid,extra' response = self.get_json( '/ports/%s?fields=%s' % (port.uuid, fields), headers={api_base.Version.string: str(api_v1.MIN_VER)}, expect_errors=True) self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_int) def test_detail(self): llc = {'switch_info': 'switch', 'switch_id': 'aa:bb:cc:dd:ee:ff', 'port_id': 'Gig0/1'} portgroup = obj_utils.create_test_portgroup(self.context, node_id=self.node.id) port = obj_utils.create_test_port(self.context, node_id=self.node.id, portgroup_id=portgroup.id, pxe_enabled=False, local_link_connection=llc) data = self.get_json( '/ports/detail', headers={api_base.Version.string: str(api_v1.MAX_VER)} ) self.assertEqual(port.uuid, data['ports'][0]["uuid"]) self.assertIn('extra', data['ports'][0]) self.assertIn('internal_info', data['ports'][0]) self.assertIn('node_uuid', data['ports'][0]) self.assertIn('pxe_enabled', data['ports'][0]) self.assertIn('local_link_connection', data['ports'][0]) self.assertIn('portgroup_uuid', data['ports'][0]) # never expose the node_id and portgroup_id self.assertNotIn('node_id', data['ports'][0]) self.assertNotIn('portgroup_id', data['ports'][0]) def test_detail_against_single(self): port = obj_utils.create_test_port(self.context, node_id=self.node.id) response = self.get_json('/ports/%s/detail' % port.uuid, expect_errors=True) self.assertEqual(http_client.NOT_FOUND, response.status_int) def test_many(self): ports = [] for id_ in range(5): port = obj_utils.create_test_port( self.context, node_id=self.node.id, uuid=uuidutils.generate_uuid(), address='52:54:00:cf:2d:3%s' % id_) ports.append(port.uuid) data = self.get_json('/ports') self.assertEqual(len(ports), len(data['ports'])) uuids = [n['uuid'] for n in data['ports']] six.assertCountEqual(self, ports, uuids) def _test_links(self, public_url=None): cfg.CONF.set_override('public_endpoint', public_url, 'api') uuid = uuidutils.generate_uuid() obj_utils.create_test_port(self.context, uuid=uuid, node_id=self.node.id) data = self.get_json('/ports/%s' % uuid) self.assertIn('links', data.keys()) self.assertEqual(2, len(data['links'])) self.assertIn(uuid, data['links'][0]['href']) for l in data['links']: bookmark = l['rel'] == 'bookmark' self.assertTrue(self.validate_link(l['href'], bookmark=bookmark)) if public_url is not None: expected = [{'href': '%s/v1/ports/%s' % (public_url, uuid), 'rel': 'self'}, {'href': '%s/ports/%s' % (public_url, uuid), 'rel': 'bookmark'}] for i in expected: self.assertIn(i, data['links']) def test_links(self): self._test_links() def test_links_public_url(self): self._test_links(public_url='http://foo') def test_collection_links(self): ports = [] for id_ in range(5): port = obj_utils.create_test_port( self.context, node_id=self.node.id, uuid=uuidutils.generate_uuid(), address='52:54:00:cf:2d:3%s' % id_) ports.append(port.uuid) data = self.get_json('/ports/?limit=3') self.assertEqual(3, len(data['ports'])) next_marker = data['ports'][-1]['uuid'] self.assertIn(next_marker, data['next']) def test_collection_links_default_limit(self): cfg.CONF.set_override('max_limit', 3, 'api') ports = [] for id_ in range(5): port = obj_utils.create_test_port( self.context, node_id=self.node.id, uuid=uuidutils.generate_uuid(), address='52:54:00:cf:2d:3%s' % id_) ports.append(port.uuid) data = self.get_json('/ports') self.assertEqual(3, len(data['ports'])) next_marker = data['ports'][-1]['uuid'] self.assertIn(next_marker, data['next']) def test_port_by_address(self): address_template = "aa:bb:cc:dd:ee:f%d" for id_ in range(3): obj_utils.create_test_port(self.context, node_id=self.node.id, uuid=uuidutils.generate_uuid(), address=address_template % id_) target_address = address_template % 1 data = self.get_json('/ports?address=%s' % target_address) self.assertThat(data['ports'], matchers.HasLength(1)) self.assertEqual(target_address, data['ports'][0]['address']) def test_port_by_address_non_existent_address(self): # non-existent address data = self.get_json('/ports?address=%s' % 'aa:bb:cc:dd:ee:ff') self.assertThat(data['ports'], matchers.HasLength(0)) def test_port_by_address_invalid_address_format(self): obj_utils.create_test_port(self.context, node_id=self.node.id) invalid_address = 'invalid-mac-format' response = self.get_json('/ports?address=%s' % invalid_address, expect_errors=True) self.assertEqual(http_client.BAD_REQUEST, response.status_int) self.assertEqual('application/json', response.content_type) self.assertIn(invalid_address, response.json['error_message']) def test_sort_key(self): ports = [] for id_ in range(3): port = obj_utils.create_test_port( self.context, node_id=self.node.id, uuid=uuidutils.generate_uuid(), address='52:54:00:cf:2d:3%s' % id_) ports.append(port.uuid) data = self.get_json('/ports?sort_key=uuid') uuids = [n['uuid'] for n in data['ports']] self.assertEqual(sorted(ports), uuids) def test_sort_key_invalid(self): invalid_keys_list = ['foo', 'extra', 'internal_info', 'local_link_connection'] for invalid_key in invalid_keys_list: response = self.get_json( '/ports?sort_key=%s' % invalid_key, expect_errors=True, headers={api_base.Version.string: str(api_v1.MAX_VER)} ) self.assertEqual(http_client.BAD_REQUEST, response.status_int) self.assertEqual('application/json', response.content_type) self.assertIn(invalid_key, response.json['error_message']) @mock.patch.object(api_utils, 'get_rpc_node') def test_get_all_by_node_name_ok(self, mock_get_rpc_node): # GET /v1/ports specifying node_name - success mock_get_rpc_node.return_value = self.node for i in range(5): if i < 3: node_id = self.node.id else: node_id = 100000 + i obj_utils.create_test_port(self.context, node_id=node_id, uuid=uuidutils.generate_uuid(), address='52:54:00:cf:2d:3%s' % i) data = self.get_json("/ports?node=%s" % 'test-node', headers={api_base.Version.string: '1.5'}) self.assertEqual(3, len(data['ports'])) @mock.patch.object(api_utils, 'get_rpc_node') def test_get_all_by_node_uuid_and_name(self, mock_get_rpc_node): # GET /v1/ports specifying node and uuid - should only use node_uuid mock_get_rpc_node.return_value = self.node obj_utils.create_test_port(self.context, node_id=self.node.id) self.get_json('/ports/detail?node_uuid=%s&node=%s' % (self.node.uuid, 'node-name')) mock_get_rpc_node.assert_called_once_with(self.node.uuid) @mock.patch.object(api_utils, 'get_rpc_node') def test_get_all_by_node_name_not_supported(self, mock_get_rpc_node): # GET /v1/ports specifying node_name - name not supported mock_get_rpc_node.side_effect = ( exception.InvalidUuidOrName(name=self.node.uuid)) for i in range(3): obj_utils.create_test_port(self.context, node_id=self.node.id, uuid=uuidutils.generate_uuid(), address='52:54:00:cf:2d:3%s' % i) data = self.get_json("/ports?node=%s" % 'test-node', expect_errors=True) self.assertEqual(0, mock_get_rpc_node.call_count) self.assertEqual(http_client.NOT_ACCEPTABLE, data.status_int) @mock.patch.object(api_utils, 'get_rpc_node') def test_detail_by_node_name_ok(self, mock_get_rpc_node): # GET /v1/ports/detail specifying node_name - success mock_get_rpc_node.return_value = self.node port = obj_utils.create_test_port(self.context, node_id=self.node.id) data = self.get_json('/ports/detail?node=%s' % 'test-node', headers={api_base.Version.string: '1.5'}) self.assertEqual(port.uuid, data['ports'][0]['uuid']) self.assertEqual(self.node.uuid, data['ports'][0]['node_uuid']) @mock.patch.object(api_utils, 'get_rpc_node') def test_detail_by_node_name_not_supported(self, mock_get_rpc_node): # GET /v1/ports/detail specifying node_name - name not supported mock_get_rpc_node.side_effect = ( exception.InvalidUuidOrName(name=self.node.uuid)) obj_utils.create_test_port(self.context, node_id=self.node.id) data = self.get_json('/ports/detail?node=%s' % 'test-node', expect_errors=True) self.assertEqual(0, mock_get_rpc_node.call_count) self.assertEqual(http_client.NOT_ACCEPTABLE, data.status_int) def test_get_all_by_portgroup_uuid(self): pg = obj_utils.create_test_portgroup(self.context, node_id=self.node.id) port = obj_utils.create_test_port(self.context, node_id=self.node.id, portgroup_id=pg.id) data = self.get_json('/ports/detail?portgroup=%s' % pg.uuid, headers={api_base.Version.string: '1.24'}) self.assertEqual(port.uuid, data['ports'][0]['uuid']) self.assertEqual(pg.uuid, data['ports'][0]['portgroup_uuid']) def test_get_all_by_portgroup_uuid_older_api_version(self): pg = obj_utils.create_test_portgroup(self.context, node_id=self.node.id) response = self.get_json( '/ports/detail?portgroup=%s' % pg.uuid, headers={api_base.Version.string: '1.14'}, expect_errors=True ) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_int) def test_get_all_by_portgroup_name(self): pg = obj_utils.create_test_portgroup(self.context, node_id=self.node.id) port = obj_utils.create_test_port(self.context, node_id=self.node.id, portgroup_id=pg.id) data = self.get_json('/ports/detail?portgroup=%s' % pg.name, headers={api_base.Version.string: '1.24'}) self.assertEqual(port.uuid, data['ports'][0]['uuid']) self.assertEqual(pg.uuid, data['ports'][0]['portgroup_uuid']) self.assertEqual(1, len(data['ports'])) def test_get_all_by_portgroup_uuid_and_node_uuid(self): pg = obj_utils.create_test_portgroup(self.context, node_id=self.node.id) response = self.get_json( '/ports/detail?portgroup=%s&node=%s' % (pg.uuid, self.node.uuid), headers={api_base.Version.string: '1.24'}, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.FORBIDDEN, response.status_int) @mock.patch.object(api_port.PortsController, '_get_ports_collection') def test_detail_with_incorrect_api_usage(self, mock_gpc): # GET /v1/ports/detail specifying node and node_uuid. In this case # we expect the node_uuid interface to be used. self.get_json('/ports/detail?node=%s&node_uuid=%s' % ('test-node', self.node.uuid)) mock_gpc.assert_called_once_with(self.node.uuid, mock.ANY, mock.ANY, mock.ANY, mock.ANY, mock.ANY, mock.ANY, mock.ANY) def test_portgroups_subresource_node_not_found(self): non_existent_uuid = 'eeeeeeee-cccc-aaaa-bbbb-cccccccccccc' response = self.get_json('/portgroups/%s/ports' % non_existent_uuid, expect_errors=True) self.assertEqual(http_client.NOT_FOUND, response.status_int) def test_portgroups_subresource_invalid_ident(self): invalid_ident = '123 123' response = self.get_json('/portgroups/%s/ports' % invalid_ident, headers={api_base.Version.string: '1.24'}, expect_errors=True) self.assertEqual(http_client.BAD_REQUEST, response.status_int) self.assertIn('Expected a logical name or UUID', response.json['error_message']) @mock.patch.object(rpcapi.ConductorAPI, 'update_port') class TestPatch(test_api_base.BaseApiTest): def setUp(self): super(TestPatch, self).setUp() self.node = obj_utils.create_test_node(self.context) self.port = obj_utils.create_test_port(self.context, node_id=self.node.id) p = mock.patch.object(rpcapi.ConductorAPI, 'get_topic_for') self.mock_gtf = p.start() self.mock_gtf.return_value = 'test-topic' self.addCleanup(p.stop) @mock.patch.object(notification_utils, '_emit_api_notification') def test_update_byid(self, mock_notify, mock_upd): extra = {'foo': 'bar'} mock_upd.return_value = self.port mock_upd.return_value.extra = extra response = self.patch_json('/ports/%s' % self.port.uuid, [{'path': '/extra/foo', 'value': 'bar', 'op': 'add'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.OK, response.status_code) self.assertEqual(extra, response.json['extra']) kargs = mock_upd.call_args[0][1] self.assertEqual(extra, kargs.extra) mock_notify.assert_has_calls([mock.call(mock.ANY, mock.ANY, 'update', obj_fields.NotificationLevel.INFO, obj_fields.NotificationStatus.START, node_uuid=self.node.uuid), mock.call(mock.ANY, mock.ANY, 'update', obj_fields.NotificationLevel.INFO, obj_fields.NotificationStatus.END, node_uuid=self.node.uuid)]) def test_update_byaddress_not_allowed(self, mock_upd): extra = {'foo': 'bar'} mock_upd.return_value = self.port mock_upd.return_value.extra = extra response = self.patch_json('/ports/%s' % self.port.address, [{'path': '/extra/foo', 'value': 'bar', 'op': 'add'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.BAD_REQUEST, response.status_int) self.assertIn(self.port.address, response.json['error_message']) self.assertFalse(mock_upd.called) def test_update_not_found(self, mock_upd): uuid = uuidutils.generate_uuid() response = self.patch_json('/ports/%s' % uuid, [{'path': '/extra/foo', 'value': 'bar', 'op': 'add'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.NOT_FOUND, response.status_int) self.assertTrue(response.json['error_message']) self.assertFalse(mock_upd.called) def test_replace_singular(self, mock_upd): address = 'aa:bb:cc:dd:ee:ff' mock_upd.return_value = self.port mock_upd.return_value.address = address response = self.patch_json('/ports/%s' % self.port.uuid, [{'path': '/address', 'value': address, 'op': 'replace'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.OK, response.status_code) self.assertEqual(address, response.json['address']) self.assertTrue(mock_upd.called) kargs = mock_upd.call_args[0][1] self.assertEqual(address, kargs.address) @mock.patch.object(notification_utils, '_emit_api_notification') def test_replace_address_already_exist(self, mock_notify, mock_upd): address = 'aa:aa:aa:aa:aa:aa' mock_upd.side_effect = exception.MACAlreadyExists(mac=address) response = self.patch_json('/ports/%s' % self.port.uuid, [{'path': '/address', 'value': address, 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.CONFLICT, response.status_code) self.assertTrue(response.json['error_message']) self.assertTrue(mock_upd.called) kargs = mock_upd.call_args[0][1] self.assertEqual(address, kargs.address) mock_notify.assert_has_calls([mock.call(mock.ANY, mock.ANY, 'update', obj_fields.NotificationLevel.INFO, obj_fields.NotificationStatus.START, node_uuid=self.node.uuid), mock.call(mock.ANY, mock.ANY, 'update', obj_fields.NotificationLevel.ERROR, obj_fields.NotificationStatus.ERROR, node_uuid=self.node.uuid)]) def test_replace_node_uuid(self, mock_upd): mock_upd.return_value = self.port response = self.patch_json('/ports/%s' % self.port.uuid, [{'path': '/node_uuid', 'value': self.node.uuid, 'op': 'replace'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.OK, response.status_code) def test_replace_local_link_connection(self, mock_upd): switch_id = 'aa:bb:cc:dd:ee:ff' mock_upd.return_value = self.port mock_upd.return_value.local_link_connection['switch_id'] = switch_id response = self.patch_json('/ports/%s' % self.port.uuid, [{'path': '/local_link_connection/switch_id', 'value': switch_id, 'op': 'replace'}], headers={api_base.Version.string: '1.19'}) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.OK, response.status_code) self.assertEqual(switch_id, response.json['local_link_connection']['switch_id']) self.assertTrue(mock_upd.called) kargs = mock_upd.call_args[0][1] self.assertEqual(switch_id, kargs.local_link_connection['switch_id']) def test_remove_local_link_connection_old_api(self, mock_upd): response = self.patch_json( '/ports/%s' % self.port.uuid, [{'path': '/local_link_connection/switch_id', 'op': 'remove'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_code) def test_set_pxe_enabled_false_old_api(self, mock_upd): response = self.patch_json('/ports/%s' % self.port.uuid, [{'path': '/pxe_enabled', 'value': False, 'op': 'add'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_code) def test_add_portgroup_uuid(self, mock_upd): mock_upd.return_value = self.port pg = obj_utils.create_test_portgroup(self.context, node_id=self.node.id, uuid=uuidutils.generate_uuid(), address='bb:bb:bb:bb:bb:bb', name='bar') headers = {api_base.Version.string: '1.24'} response = self.patch_json('/ports/%s' % self.port.uuid, [{'path': '/portgroup_uuid', 'value': pg.uuid, 'op': 'add'}], headers=headers) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.OK, response.status_code) def test_replace_portgroup_uuid(self, mock_upd): pg = obj_utils.create_test_portgroup(self.context, node_id=self.node.id, uuid=uuidutils.generate_uuid(), address='bb:bb:bb:bb:bb:bb', name='bar') mock_upd.return_value = self.port headers = {api_base.Version.string: '1.24'} response = self.patch_json('/ports/%s' % self.port.uuid, [{'path': '/portgroup_uuid', 'value': pg.uuid, 'op': 'replace'}], headers=headers) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.OK, response.status_code) def test_replace_portgroup_uuid_remove(self, mock_upd): pg = obj_utils.create_test_portgroup(self.context, node_id=self.node.id, uuid=uuidutils.generate_uuid(), address='bb:bb:bb:bb:bb:bb', name='bar') mock_upd.return_value = self.port headers = {api_base.Version.string: '1.24'} response = self.patch_json('/ports/%s' % self.port.uuid, [{'path': '/portgroup_uuid', 'value': pg.uuid, 'op': 'remove'}], headers=headers) self.assertEqual('application/json', response.content_type) self.assertIsNone(mock_upd.call_args[0][1].portgroup_id) def test_replace_portgroup_uuid_remove_add(self, mock_upd): pg = obj_utils.create_test_portgroup(self.context, node_id=self.node.id, uuid=uuidutils.generate_uuid(), address='bb:bb:bb:bb:bb:bb', name='bar') pg1 = obj_utils.create_test_portgroup(self.context, node_id=self.node.id, uuid=uuidutils.generate_uuid(), address='bb:bb:bb:bb:bb:b1', name='bbb') mock_upd.return_value = self.port headers = {api_base.Version.string: '1.24'} response = self.patch_json('/ports/%s' % self.port.uuid, [{'path': '/portgroup_uuid', 'value': pg.uuid, 'op': 'remove'}, {'path': '/portgroup_uuid', 'value': pg1.uuid, 'op': 'add'}], headers=headers) self.assertEqual('application/json', response.content_type) self.assertTrue(pg1.id, mock_upd.call_args[0][1].portgroup_id) def test_replace_portgroup_uuid_old_api(self, mock_upd): pg = obj_utils.create_test_portgroup(self.context, node_id=self.node.id, uuid=uuidutils.generate_uuid(), address='bb:bb:bb:bb:bb:bb', name='bar') mock_upd.return_value = self.port headers = {api_base.Version.string: '1.15'} response = self.patch_json('/ports/%s' % self.port.uuid, [{'path': '/portgroup_uuid', 'value': pg.uuid, 'op': 'replace'}], headers=headers, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_code) def test_add_node_uuid(self, mock_upd): mock_upd.return_value = self.port response = self.patch_json('/ports/%s' % self.port.uuid, [{'path': '/node_uuid', 'value': self.node.uuid, 'op': 'add'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.OK, response.status_code) def test_add_node_id(self, mock_upd): response = self.patch_json('/ports/%s' % self.port.uuid, [{'path': '/node_id', 'value': '1', 'op': 'add'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.BAD_REQUEST, response.status_code) self.assertFalse(mock_upd.called) def test_replace_node_id(self, mock_upd): response = self.patch_json('/ports/%s' % self.port.uuid, [{'path': '/node_id', 'value': '1', 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.BAD_REQUEST, response.status_code) self.assertFalse(mock_upd.called) def test_remove_node_id(self, mock_upd): response = self.patch_json('/ports/%s' % self.port.uuid, [{'path': '/node_id', 'op': 'remove'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.BAD_REQUEST, response.status_code) self.assertFalse(mock_upd.called) def test_replace_non_existent_node_uuid(self, mock_upd): node_uuid = '12506333-a81c-4d59-9987-889ed5f8687b' response = self.patch_json('/ports/%s' % self.port.uuid, [{'path': '/node_uuid', 'value': node_uuid, 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.BAD_REQUEST, response.status_code) self.assertIn(node_uuid, response.json['error_message']) self.assertFalse(mock_upd.called) def test_replace_multi(self, mock_upd): extra = {"foo1": "bar1", "foo2": "bar2", "foo3": "bar3"} self.port.extra = extra self.port.save() # mutate extra so we replace all of them extra = dict((k, extra[k] + 'x') for k in extra.keys()) patch = [] for k in extra.keys(): patch.append({'path': '/extra/%s' % k, 'value': extra[k], 'op': 'replace'}) mock_upd.return_value = self.port mock_upd.return_value.extra = extra response = self.patch_json('/ports/%s' % self.port.uuid, patch) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.OK, response.status_code) self.assertEqual(extra, response.json['extra']) kargs = mock_upd.call_args[0][1] self.assertEqual(extra, kargs.extra) def test_remove_multi(self, mock_upd): extra = {"foo1": "bar1", "foo2": "bar2", "foo3": "bar3"} self.port.extra = extra self.port.save() # Removing one item from the collection extra.pop('foo1') mock_upd.return_value = self.port mock_upd.return_value.extra = extra response = self.patch_json('/ports/%s' % self.port.uuid, [{'path': '/extra/foo1', 'op': 'remove'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.OK, response.status_code) self.assertEqual(extra, response.json['extra']) kargs = mock_upd.call_args[0][1] self.assertEqual(extra, kargs.extra) # Removing the collection extra = {} mock_upd.return_value.extra = extra response = self.patch_json('/ports/%s' % self.port.uuid, [{'path': '/extra', 'op': 'remove'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.OK, response.status_code) self.assertEqual({}, response.json['extra']) kargs = mock_upd.call_args[0][1] self.assertEqual(extra, kargs.extra) # Assert nothing else was changed self.assertEqual(self.port.uuid, response.json['uuid']) self.assertEqual(self.port.address, response.json['address']) def test_remove_non_existent_property_fail(self, mock_upd): response = self.patch_json('/ports/%s' % self.port.uuid, [{'path': '/extra/non-existent', 'op': 'remove'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.BAD_REQUEST, response.status_code) self.assertTrue(response.json['error_message']) self.assertFalse(mock_upd.called) def test_remove_mandatory_field(self, mock_upd): response = self.patch_json('/ports/%s' % self.port.uuid, [{'path': '/address', 'op': 'remove'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.BAD_REQUEST, response.status_code) self.assertTrue(response.json['error_message']) self.assertIn('mandatory attribute', response.json['error_message']) self.assertFalse(mock_upd.called) def test_add_root(self, mock_upd): address = 'aa:bb:cc:dd:ee:ff' mock_upd.return_value = self.port mock_upd.return_value.address = address response = self.patch_json('/ports/%s' % self.port.uuid, [{'path': '/address', 'value': address, 'op': 'add'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.OK, response.status_code) self.assertEqual(address, response.json['address']) self.assertTrue(mock_upd.called) kargs = mock_upd.call_args[0][1] self.assertEqual(address, kargs.address) def test_add_root_non_existent(self, mock_upd): response = self.patch_json('/ports/%s' % self.port.uuid, [{'path': '/foo', 'value': 'bar', 'op': 'add'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.BAD_REQUEST, response.status_int) self.assertTrue(response.json['error_message']) self.assertFalse(mock_upd.called) def test_add_multi(self, mock_upd): extra = {"foo1": "bar1", "foo2": "bar2", "foo3": "bar3"} patch = [] for k in extra.keys(): patch.append({'path': '/extra/%s' % k, 'value': extra[k], 'op': 'add'}) mock_upd.return_value = self.port mock_upd.return_value.extra = extra response = self.patch_json('/ports/%s' % self.port.uuid, patch) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.OK, response.status_code) self.assertEqual(extra, response.json['extra']) kargs = mock_upd.call_args[0][1] self.assertEqual(extra, kargs.extra) def test_remove_uuid(self, mock_upd): response = self.patch_json('/ports/%s' % self.port.uuid, [{'path': '/uuid', 'op': 'remove'}], expect_errors=True) self.assertEqual(http_client.BAD_REQUEST, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) self.assertFalse(mock_upd.called) def test_update_address_invalid_format(self, mock_upd): response = self.patch_json('/ports/%s' % self.port.uuid, [{'path': '/address', 'value': 'invalid-format', 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.BAD_REQUEST, response.status_int) self.assertTrue(response.json['error_message']) self.assertFalse(mock_upd.called) def test_update_port_address_normalized(self, mock_upd): address = 'AA:BB:CC:DD:EE:FF' mock_upd.return_value = self.port mock_upd.return_value.address = address.lower() response = self.patch_json('/ports/%s' % self.port.uuid, [{'path': '/address', 'value': address, 'op': 'replace'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.OK, response.status_code) self.assertEqual(address.lower(), response.json['address']) kargs = mock_upd.call_args[0][1] self.assertEqual(address.lower(), kargs.address) def test_update_pxe_enabled_allowed(self, mock_upd): pxe_enabled = True mock_upd.return_value = self.port mock_upd.return_value.pxe_enabled = pxe_enabled response = self.patch_json('/ports/%s' % self.port.uuid, [{'path': '/pxe_enabled', 'value': pxe_enabled, 'op': 'replace'}], headers={api_base.Version.string: '1.19'}) self.assertEqual(http_client.OK, response.status_code) self.assertEqual(pxe_enabled, response.json['pxe_enabled']) def test_update_pxe_enabled_old_api_version(self, mock_upd): pxe_enabled = True mock_upd.return_value = self.port headers = {api_base.Version.string: '1.14'} response = self.patch_json('/ports/%s' % self.port.uuid, [{'path': '/pxe_enabled', 'value': pxe_enabled, 'op': 'replace'}], expect_errors=True, headers=headers) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_int) self.assertFalse(mock_upd.called) def test_portgroups_subresource_patch(self, mock_upd): portgroup = obj_utils.create_test_portgroup(self.context, node_id=self.node.id) port = obj_utils.create_test_port(self.context, node_id=self.node.id, uuid=uuidutils.generate_uuid(), portgroup_id=portgroup.id, address='52:55:00:cf:2d:31') headers = {api_base.Version.string: '1.24'} response = self.patch_json( '/portgroups/%(portgroup)s/ports/%(port)s' % {'portgroup': portgroup.uuid, 'port': port.uuid}, [{'path': '/address', 'value': '00:00:00:00:00:00', 'op': 'replace'}], headers=headers, expect_errors=True) self.assertEqual(http_client.FORBIDDEN, response.status_int) self.assertEqual('application/json', response.content_type) class TestPost(test_api_base.BaseApiTest): def setUp(self): super(TestPost, self).setUp() self.node = obj_utils.create_test_node(self.context) self.portgroup = obj_utils.create_test_portgroup(self.context, node_id=self.node.id) self.headers = {api_base.Version.string: str( versions.MAX_VERSION_STRING)} @mock.patch.object(common_utils, 'warn_about_deprecated_extra_vif_port_id', autospec=True) @mock.patch.object(notification_utils, '_emit_api_notification') @mock.patch.object(timeutils, 'utcnow') def test_create_port(self, mock_utcnow, mock_notify, mock_warn): pdict = post_get_test_port() test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time response = self.post_json('/ports', pdict, headers=self.headers) self.assertEqual(http_client.CREATED, response.status_int) result = self.get_json('/ports/%s' % pdict['uuid'], headers=self.headers) self.assertEqual(pdict['uuid'], result['uuid']) self.assertFalse(result['updated_at']) return_created_at = timeutils.parse_isotime( result['created_at']).replace(tzinfo=None) self.assertEqual(test_time, return_created_at) # Check location header self.assertIsNotNone(response.location) expected_location = '/v1/ports/%s' % pdict['uuid'] self.assertEqual(urlparse.urlparse(response.location).path, expected_location) mock_notify.assert_has_calls([mock.call(mock.ANY, mock.ANY, 'create', obj_fields.NotificationLevel.INFO, obj_fields.NotificationStatus.START, node_uuid=self.node.uuid), mock.call(mock.ANY, mock.ANY, 'create', obj_fields.NotificationLevel.INFO, obj_fields.NotificationStatus.END, node_uuid=self.node.uuid)]) self.assertEqual(0, mock_warn.call_count) def test_create_port_min_api_version(self): pdict = post_get_test_port( node_uuid=self.node.uuid) pdict.pop('local_link_connection') pdict.pop('pxe_enabled') pdict.pop('extra') headers = {api_base.Version.string: str(api_v1.MIN_VER)} response = self.post_json('/ports', pdict, headers=headers) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.CREATED, response.status_int) self.assertEqual(self.node.uuid, response.json['node_uuid']) def test_create_port_doesnt_contain_id(self): with mock.patch.object(self.dbapi, 'create_port', wraps=self.dbapi.create_port) as cp_mock: pdict = post_get_test_port(extra={'foo': 123}) self.post_json('/ports', pdict, headers=self.headers) result = self.get_json('/ports/%s' % pdict['uuid'], headers=self.headers) self.assertEqual(pdict['extra'], result['extra']) cp_mock.assert_called_once_with(mock.ANY) # Check that 'id' is not in first arg of positional args self.assertNotIn('id', cp_mock.call_args[0][0]) @mock.patch.object(notification_utils.LOG, 'exception', autospec=True) @mock.patch.object(notification_utils.LOG, 'warning', autospec=True) def test_create_port_generate_uuid(self, mock_warning, mock_exception): pdict = post_get_test_port() del pdict['uuid'] response = self.post_json('/ports', pdict, headers=self.headers) result = self.get_json('/ports/%s' % response.json['uuid'], headers=self.headers) self.assertEqual(pdict['address'], result['address']) self.assertTrue(uuidutils.is_uuid_like(result['uuid'])) self.assertFalse(mock_warning.called) self.assertFalse(mock_exception.called) @mock.patch.object(notification_utils, '_emit_api_notification') @mock.patch.object(objects.Port, 'create') def test_create_port_error(self, mock_create, mock_notify): mock_create.side_effect = Exception() pdict = post_get_test_port() self.post_json('/ports', pdict, headers=self.headers, expect_errors=True) mock_notify.assert_has_calls([mock.call(mock.ANY, mock.ANY, 'create', obj_fields.NotificationLevel.INFO, obj_fields.NotificationStatus.START, node_uuid=self.node.uuid), mock.call(mock.ANY, mock.ANY, 'create', obj_fields.NotificationLevel.ERROR, obj_fields.NotificationStatus.ERROR, node_uuid=self.node.uuid)]) def test_create_port_valid_extra(self): pdict = post_get_test_port(extra={'str': 'foo', 'int': 123, 'float': 0.1, 'bool': True, 'list': [1, 2], 'none': None, 'dict': {'cat': 'meow'}}) self.post_json('/ports', pdict, headers=self.headers) result = self.get_json('/ports/%s' % pdict['uuid'], headers=self.headers) self.assertEqual(pdict['extra'], result['extra']) def test_create_port_no_mandatory_field_address(self): pdict = post_get_test_port() del pdict['address'] response = self.post_json('/ports', pdict, expect_errors=True, headers=self.headers) self.assertEqual(http_client.BAD_REQUEST, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) def test_create_port_no_mandatory_field_node_uuid(self): pdict = post_get_test_port() del pdict['node_uuid'] response = self.post_json('/ports', pdict, expect_errors=True) self.assertEqual(http_client.BAD_REQUEST, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) def test_create_port_invalid_addr_format(self): pdict = post_get_test_port(address='invalid-format') response = self.post_json('/ports', pdict, expect_errors=True) self.assertEqual(http_client.BAD_REQUEST, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) def test_create_port_address_normalized(self): address = 'AA:BB:CC:DD:EE:FF' pdict = post_get_test_port(address=address) self.post_json('/ports', pdict, headers=self.headers) result = self.get_json('/ports/%s' % pdict['uuid'], headers=self.headers) self.assertEqual(address.lower(), result['address']) def test_create_port_with_hyphens_delimiter(self): pdict = post_get_test_port() colonsMAC = pdict['address'] hyphensMAC = colonsMAC.replace(':', '-') pdict['address'] = hyphensMAC response = self.post_json('/ports', pdict, expect_errors=True) self.assertEqual(http_client.BAD_REQUEST, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) def test_create_port_invalid_node_uuid_format(self): pdict = post_get_test_port(node_uuid='invalid-format') response = self.post_json('/ports', pdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.BAD_REQUEST, response.status_int) self.assertTrue(response.json['error_message']) def test_node_uuid_to_node_id_mapping(self): pdict = post_get_test_port(node_uuid=self.node['uuid']) self.post_json('/ports', pdict, headers=self.headers) # GET doesn't return the node_id it's an internal value port = self.dbapi.get_port_by_uuid(pdict['uuid']) self.assertEqual(self.node['id'], port.node_id) def test_create_port_node_uuid_not_found(self): pdict = post_get_test_port( node_uuid='1a1a1a1a-2b2b-3c3c-4d4d-5e5e5e5e5e5e') response = self.post_json('/ports', pdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.BAD_REQUEST, response.status_int) self.assertTrue(response.json['error_message']) def test_create_port_portgroup_uuid_not_found(self): pdict = post_get_test_port( portgroup_uuid='1a1a1a1a-2b2b-3c3c-4d4d-5e5e5e5e5e5e') response = self.post_json('/ports', pdict, expect_errors=True, headers=self.headers) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.BAD_REQUEST, response.status_int) self.assertTrue(response.json['error_message']) def test_create_port_portgroup_uuid_not_found_old_api_version(self): pdict = post_get_test_port( portgroup_uuid='1a1a1a1a-2b2b-3c3c-4d4d-5e5e5e5e5e5e') response = self.post_json('/ports', pdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_int) self.assertTrue(response.json['error_message']) def test_create_port_portgroup(self): pdict = post_get_test_port( portgroup_uuid=self.portgroup.uuid, node_uuid=self.node.uuid) response = self.post_json('/ports', pdict, headers=self.headers) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.CREATED, response.status_int) def test_create_port_portgroup_different_nodes(self): pdict = post_get_test_port( portgroup_uuid=self.portgroup.uuid, node_uuid=uuidutils.generate_uuid()) response = self.post_json('/ports', pdict, headers=self.headers, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.BAD_REQUEST, response.status_int) def test_create_port_portgroup_old_api_version(self): pdict = post_get_test_port( portgroup_uuid=self.portgroup.uuid, node_uuid=self.node.uuid ) headers = {api_base.Version.string: '1.15'} response = self.post_json('/ports', pdict, expect_errors=True, headers=headers) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_int) def test_create_port_address_already_exist(self): address = 'AA:AA:AA:11:22:33' pdict = post_get_test_port(address=address) self.post_json('/ports', pdict, headers=self.headers) pdict['uuid'] = uuidutils.generate_uuid() response = self.post_json('/ports', pdict, expect_errors=True, headers=self.headers) self.assertEqual(http_client.CONFLICT, response.status_int) self.assertEqual('application/json', response.content_type) error_msg = response.json['error_message'] self.assertTrue(error_msg) self.assertIn(address, error_msg.upper()) def test_create_port_with_internal_field(self): pdict = post_get_test_port() pdict['internal_info'] = {'a': 'b'} response = self.post_json('/ports', pdict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.BAD_REQUEST, response.status_int) self.assertTrue(response.json['error_message']) def test_create_port_some_invalid_local_link_connection_key(self): pdict = post_get_test_port( local_link_connection={'switch_id': 'value1', 'port_id': 'Ethernet1/15', 'switch_foo': 'value3'}) response = self.post_json('/ports', pdict, expect_errors=True, headers=self.headers) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.BAD_REQUEST, response.status_int) self.assertTrue(response.json['error_message']) def test_create_port_local_link_connection_keys(self): pdict = post_get_test_port( local_link_connection={'switch_id': '0a:1b:2c:3d:4e:5f', 'port_id': 'Ethernet1/15', 'switch_info': 'value3'}) response = self.post_json('/ports', pdict, headers=self.headers) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.CREATED, response.status_int) def test_create_port_local_link_connection_switch_id_bad_mac(self): pdict = post_get_test_port( local_link_connection={'switch_id': 'zz:zz:zz:zz:zz:zz', 'port_id': 'Ethernet1/15', 'switch_info': 'value3'}) response = self.post_json('/ports', pdict, expect_errors=True, headers=self.headers) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.BAD_REQUEST, response.status_int) self.assertTrue(response.json['error_message']) def test_create_port_local_link_connection_missing_mandatory(self): pdict = post_get_test_port( local_link_connection={'switch_id': '0a:1b:2c:3d:4e:5f', 'switch_info': 'fooswitch'}) response = self.post_json('/ports', pdict, expect_errors=True, headers=self.headers) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.BAD_REQUEST, response.status_int) def test_create_port_local_link_connection_missing_optional(self): pdict = post_get_test_port( local_link_connection={'switch_id': '0a:1b:2c:3d:4e:5f', 'port_id': 'Ethernet1/15'}) response = self.post_json('/ports', pdict, headers=self.headers) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.CREATED, response.status_int) def test_create_port_with_llc_old_api_version(self): headers = {api_base.Version.string: '1.14'} pdict = post_get_test_port( local_link_connection={'switch_id': '0a:1b:2c:3d:4e:5f', 'port_id': 'Ethernet1/15'}) response = self.post_json('/ports', pdict, headers=headers, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_int) def test_create_port_with_pxe_enabled_old_api_version(self): headers = {api_base.Version.string: '1.14'} pdict = post_get_test_port(pxe_enabled=False) del pdict['local_link_connection'] del pdict['portgroup_uuid'] response = self.post_json('/ports', pdict, headers=headers, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_int) def test_portgroups_subresource_post(self): headers = {api_base.Version.string: '1.24'} pdict = post_get_test_port() response = self.post_json('/portgroups/%s/ports' % self.portgroup.uuid, pdict, headers=headers, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.FORBIDDEN, response.status_int) @mock.patch.object(common_utils, 'warn_about_deprecated_extra_vif_port_id', autospec=True) def test_create_port_with_extra_vif_port_id_deprecated(self, mock_warn): pdict = post_get_test_port(pxe_enabled=False, extra={'vif_port_id': 'foo'}) response = self.post_json('/ports', pdict, headers=self.headers) self.assertEqual('application/json', response.content_type) self.assertEqual(http_client.CREATED, response.status_int) self.assertEqual(1, mock_warn.call_count) def _test_create_port(self, has_vif=False, in_portgroup=False, pxe_enabled=True, standalone_ports=True, http_status=http_client.CREATED): extra = {} if has_vif: extra = {'vif_port_id': uuidutils.generate_uuid()} pdict = post_get_test_port( node_uuid=self.node.uuid, pxe_enabled=pxe_enabled, extra=extra) if not in_portgroup: pdict.pop('portgroup_uuid') else: self.portgroup.standalone_ports_supported = standalone_ports self.portgroup.save() expect_errors = http_status != http_client.CREATED response = self.post_json('/ports', pdict, headers=self.headers, expect_errors=expect_errors) self.assertEqual('application/json', response.content_type) self.assertEqual(http_status, response.status_int) if not expect_errors: expected_portgroup_uuid = pdict.get('portgroup_uuid', None) self.assertEqual(expected_portgroup_uuid, response.json['portgroup_uuid']) self.assertEqual(extra, response.json['extra']) def test_create_port_novif_pxe_noportgroup(self): self._test_create_port(has_vif=False, in_portgroup=False, pxe_enabled=True, http_status=http_client.CREATED) def test_create_port_novif_nopxe_noportgroup(self): self._test_create_port(has_vif=False, in_portgroup=False, pxe_enabled=False, http_status=http_client.CREATED) def test_create_port_vif_pxe_noportgroup(self): self._test_create_port(has_vif=True, in_portgroup=False, pxe_enabled=True, http_status=http_client.CREATED) def test_create_port_vif_nopxe_noportgroup(self): self._test_create_port(has_vif=True, in_portgroup=False, pxe_enabled=False, http_status=http_client.CREATED) def test_create_port_novif_pxe_portgroup_standalone_ports(self): self._test_create_port(has_vif=False, in_portgroup=True, pxe_enabled=True, standalone_ports=True, http_status=http_client.CREATED) def test_create_port_novif_pxe_portgroup_nostandalone_ports(self): self._test_create_port(has_vif=False, in_portgroup=True, pxe_enabled=True, standalone_ports=False, http_status=http_client.CONFLICT) def test_create_port_novif_nopxe_portgroup_standalone_ports(self): self._test_create_port(has_vif=False, in_portgroup=True, pxe_enabled=False, standalone_ports=True, http_status=http_client.CREATED) def test_create_port_novif_nopxe_portgroup_nostandalone_ports(self): self._test_create_port(has_vif=False, in_portgroup=True, pxe_enabled=False, standalone_ports=False, http_status=http_client.CREATED) def test_create_port_vif_pxe_portgroup_standalone_ports(self): self._test_create_port(has_vif=True, in_portgroup=True, pxe_enabled=True, standalone_ports=True, http_status=http_client.CREATED) def test_create_port_vif_pxe_portgroup_nostandalone_ports(self): self._test_create_port(has_vif=True, in_portgroup=True, pxe_enabled=True, standalone_ports=False, http_status=http_client.CONFLICT) def test_create_port_vif_nopxe_portgroup_standalone_ports(self): self._test_create_port(has_vif=True, in_portgroup=True, pxe_enabled=False, standalone_ports=True, http_status=http_client.CREATED) def test_create_port_vif_nopxe_portgroup_nostandalone_ports(self): self._test_create_port(has_vif=True, in_portgroup=True, pxe_enabled=False, standalone_ports=False, http_status=http_client.CONFLICT) @mock.patch.object(rpcapi.ConductorAPI, 'destroy_port') class TestDelete(test_api_base.BaseApiTest): def setUp(self): super(TestDelete, self).setUp() self.node = obj_utils.create_test_node(self.context) self.port = obj_utils.create_test_port(self.context, node_id=self.node.id) gtf = mock.patch.object(rpcapi.ConductorAPI, 'get_topic_for') self.mock_gtf = gtf.start() self.mock_gtf.return_value = 'test-topic' self.addCleanup(gtf.stop) def test_delete_port_byaddress(self, mock_dpt): response = self.delete('/ports/%s' % self.port.address, expect_errors=True) self.assertEqual(http_client.BAD_REQUEST, response.status_int) self.assertEqual('application/json', response.content_type) self.assertIn(self.port.address, response.json['error_message']) @mock.patch.object(notification_utils, '_emit_api_notification') def test_delete_port_byid(self, mock_notify, mock_dpt): self.delete('/ports/%s' % self.port.uuid, expect_errors=True) self.assertTrue(mock_dpt.called) mock_notify.assert_has_calls([mock.call(mock.ANY, mock.ANY, 'delete', obj_fields.NotificationLevel.INFO, obj_fields.NotificationStatus.START, node_uuid=self.node.uuid), mock.call(mock.ANY, mock.ANY, 'delete', obj_fields.NotificationLevel.INFO, obj_fields.NotificationStatus.END, node_uuid=self.node.uuid)]) @mock.patch.object(notification_utils, '_emit_api_notification') def test_delete_port_node_locked(self, mock_notify, mock_dpt): self.node.reserve(self.context, 'fake', self.node.uuid) mock_dpt.side_effect = exception.NodeLocked(node='fake-node', host='fake-host') ret = self.delete('/ports/%s' % self.port.uuid, expect_errors=True) self.assertEqual(http_client.CONFLICT, ret.status_code) self.assertTrue(ret.json['error_message']) self.assertTrue(mock_dpt.called) mock_notify.assert_has_calls([mock.call(mock.ANY, mock.ANY, 'delete', obj_fields.NotificationLevel.INFO, obj_fields.NotificationStatus.START, node_uuid=self.node.uuid), mock.call(mock.ANY, mock.ANY, 'delete', obj_fields.NotificationLevel.ERROR, obj_fields.NotificationStatus.ERROR, node_uuid=self.node.uuid)]) def test_portgroups_subresource_delete(self, mock_dpt): portgroup = obj_utils.create_test_portgroup(self.context, node_id=self.node.id) port = obj_utils.create_test_port(self.context, node_id=self.node.id, uuid=uuidutils.generate_uuid(), portgroup_id=portgroup.id, address='52:55:00:cf:2d:31') headers = {api_base.Version.string: '1.24'} response = self.delete( '/portgroups/%(portgroup)s/ports/%(port)s' % {'portgroup': portgroup.uuid, 'port': port.uuid}, headers=headers, expect_errors=True) self.assertEqual(http_client.FORBIDDEN, response.status_int) self.assertEqual('application/json', response.content_type)
{ "content_hash": "218d8df77a3baf87598da7cfd76db56d", "timestamp": "", "source": "github", "line_count": 1423, "max_line_length": 79, "avg_line_length": 49.84961349262122, "alnum_prop": 0.549481222510432, "repo_name": "ruyang/ironic", "id": "b6f1bc36e0fde31650aa11ddb6207bdba95d3e0c", "size": "71536", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "ironic/tests/unit/api/v1/test_ports.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Mako", "bytes": "349" }, { "name": "Python", "bytes": "5133461" }, { "name": "Shell", "bytes": "107097" } ], "symlink_target": "" }
import logging from ncclient import manager import sys # the variables below assume the user is requesting access to a # Nexus device running in VIRL in the DevNet Always On Sandbox # use the IP address or hostname of your Nexus device HOST = '172.16.1.82' # use the NETCONF port for your Nexus device PORT = 22 # use the user credentials for your Nexus device USER = 'cisco' PASS = 'cisco' # create a main() method def main(): """Main method that prints NETCONF capabilities of remote device.""" with manager.connect(host=HOST, port=PORT, username=USER, password=PASS, hostkey_verify=False, device_params={'name': 'nexus'}, look_for_keys=False, allow_agent=False) as m: # print all NETCONF capabilities print('***Here are the Remote Devices Capabilities***') for capability in m.server_capabilities: print(capability) if __name__ == '__main__': sys.exit(main())
{ "content_hash": "09547b6d72869b8d3cc27e94d20d29f3", "timestamp": "", "source": "github", "line_count": 30, "max_line_length": 79, "avg_line_length": 32.266666666666666, "alnum_prop": 0.6663223140495868, "repo_name": "CiscoDevNet/netconf-examples", "id": "64753bf77efbcb780b5bc9dec26149f4aab77554", "size": "991", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "netconf-101/get_capabilities.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "17337" } ], "symlink_target": "" }
import site import getopt, string, sys from PIL import Image def usage(): print "PIL Convert 0.5/1998-12-30 -- convert image files" print "Usage: pilconvert [option] infile outfile" print print "Options:" print print " -c <format> convert to format (default is given by extension)" print print " -g convert to greyscale" print " -p convert to palette image (using standard palette)" print " -r convert to rgb" print print " -o optimize output (trade speed for size)" print " -q <value> set compression quality (0-100, JPEG only)" print print " -f list supported file formats" sys.exit(1) if len(sys.argv) == 1: usage() try: opt, argv = getopt.getopt(sys.argv[1:], "c:dfgopq:r") except getopt.error, v: print v sys.exit(1) format = None convert = None options = { } for o, a in opt: if o == "-f": Image.init() id = Image.ID[:] id.sort() print "Supported formats (* indicates output format):" for i in id: if Image.SAVE.has_key(i): print i+"*", else: print i, sys.exit(1) elif o == "-c": format = a if o == "-g": convert = "L" elif o == "-p": convert = "P" elif o == "-r": convert = "RGB" elif o == "-o": options["optimize"] = 1 elif o == "-q": options["quality"] = string.atoi(a) if len(argv) != 2: usage() try: im = Image.open(argv[0]) if convert and im.mode != convert: im.draft(convert, im.size) im = im.convert(convert) if format: apply(im.save, (argv[1], format), options) else: apply(im.save, (argv[1],), options) except: print "cannot convert image", print "(%s:%s)" % (sys.exc_type, sys.exc_value)
{ "content_hash": "6e248aaff179c35268a72fe9eeb57b2a", "timestamp": "", "source": "github", "line_count": 81, "max_line_length": 76, "avg_line_length": 23.432098765432098, "alnum_prop": 0.5305584826132771, "repo_name": "DMLoy/ECommerceBasic", "id": "a8b3ab0ecd8137443dcd79d97c6e7b66c34592f6", "size": "2291", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "bin/pilconvert.py", "mode": "33261", "license": "mit", "language": [ { "name": "CSS", "bytes": "235478" }, { "name": "JavaScript", "bytes": "246388" }, { "name": "Python", "bytes": "7601902" }, { "name": "Shell", "bytes": "7388" } ], "symlink_target": "" }
''' utils.py Functions that don't fit anywhere else. ''' import random import pickle import math from io import BytesIO import os import glob import socket import math import zipfile import itertools from PIL import Image import numpy as np ''' IMAGES ''' def scale(im, size=128): ''' accepts: PIL image, size of square sides returns: PIL image scaled so sides lenght = size ''' size = (size,size) im.thumbnail(size, Image.ANTIALIAS) return im def img_to_binary(img): ''' accepts: PIL image returns: binary stream (used to save to database) ''' f = BytesIO() img.save(f, format='jpeg') return f.getvalue() def arr_to_binary(arr): ''' accepts: numpy array with shape (Hight, Width, Channels) returns: binary stream (used to save to database) ''' img = arr_to_img(arr) return img_to_binary(img) def arr_to_img(arr): ''' accepts: numpy array with shape (Hight, Width, Channels) returns: binary stream (used to save to database) ''' arr = np.uint8(arr) img = Image.fromarray(arr) return img def img_to_arr(img): ''' accepts: numpy array with shape (Hight, Width, Channels) returns: binary stream (used to save to database) ''' return np.array(img) def binary_to_img(binary): ''' accepts: binary file object from BytesIO returns: PIL image ''' img = BytesIO(binary) return Image.open(img) def norm_img(img): return (img - img.mean() / np.std(img))/255.0 def create_video(img_dir_path, output_video_path): import envoy # Setup path to the images with telemetry. full_path = os.path.join(img_dir_path, 'frame_*.png') # Run ffmpeg. command = ("""ffmpeg -framerate 30/1 -pattern_type glob -i '%s' -c:v libx264 -r 15 -pix_fmt yuv420p -y %s""" % (full_path, output_video_path)) response = envoy.run(command) ''' FILES ''' def most_recent_file(dir_path, ext=''): ''' return the most recent file given a directory path and extension ''' query = dir_path + '/*' + ext newest = min(glob.iglob(query), key=os.path.getctime) return newest def make_dir(path): real_path = os.path.expanduser(path) if not os.path.exists(real_path): os.makedirs(real_path) return real_path def zip_dir(dir_path, zip_path): """ Create and save a zipfile of a one level directory """ file_paths = glob.glob(dir_path + "/*") #create path to search for files. zf = zipfile.ZipFile(zip_path, 'w') dir_name = os.path.basename(dir_path) for p in file_paths: file_name = os.path.basename(p) zf.write(p, arcname=os.path.join(dir_name, file_name)) zf.close() return zip_path ''' BINNING functions to help converte between floating point numbers and categories. ''' def linear_bin(a): a = a + 1 b = round(a / (2/14)) return int(b) def linear_unbin(b): a = b *(2/14) - 1 return a def bin_Y(Y): d = [] for y in Y: arr = np.zeros(15) arr[linear_bin(y)] = 1 d.append(arr) return np.array(d) def unbin_Y(Y): d=[] for y in Y: v = np.argmax(y) v = linear_unbin(v) d.append(v) return np.array(d) ''' NETWORKING ''' def my_ip(): s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(('192.0.0.8', 1027)) return s.getsockname()[0] ''' OTHER ''' def merge_two_dicts(x, y): """Given two dicts, merge them into a new dict as a shallow copy.""" z = x.copy() z.update(y) return z def param_gen(params): ''' Accepts a dictionary of parameter options and returns a list of dictionary with the permutations of the parameters. ''' for p in itertools.product(*params.values()): yield dict(zip(params.keys(), p ))
{ "content_hash": "78c96d8a37c334ec8a58a8333d5a785e", "timestamp": "", "source": "github", "line_count": 213, "max_line_length": 77, "avg_line_length": 18.586854460093896, "alnum_prop": 0.5950997726698661, "repo_name": "SarthakJShetty/MyDonkey", "id": "e4455a4a32c72c83dfca7f076aa93e15fc1d376f", "size": "3959", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "donkey/utils.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "1658" }, { "name": "HTML", "bytes": "15988" }, { "name": "JavaScript", "bytes": "57043" }, { "name": "Python", "bytes": "81983" }, { "name": "Shell", "bytes": "1544" } ], "symlink_target": "" }
"""Given experimental parameters, runs the required experiments and obtains the data. To be executed on the Hadoop main node. """ import argparse import ConfigParser import datetime import re from benchmark_utils import * from paths import * VALID_PARAMS = {"lambda", "minpart", "samplefrac", "oraclesize", "num-executors"} VAL_PARAMS = {"lambda", "minpart", "samplefrac", "oraclesize", "stopcrit", "roundlimit", "gaplimit", "gapcheck", "gapthresh", "timelimit", "debugmult"} BOOL_PARAMS = {"sparse", "debug", "linesearch"} HOME_DIR = os.getenv("HOME") PROJ_DIR = os.path.join(HOME_DIR, "dissolve-struct") DEFAULT_CORES = 4 def execute(command, cwd=PROJ_DIR): subprocess.check_call(command, cwd=cwd, shell=True) def str_to_bool(s): if s in ['True', 'true']: return True elif s in ['False', 'false']: return False else: raise ValueError("Boolean value in config '%s' unrecognized") def main(): parser = argparse.ArgumentParser(description='Run benchmark') parser.add_argument("expt_config", help="Experimental config file") parser.add_argument("--ds", help="Run with debugging separately. Forces execution of two spark jobs", action='store_true') args = parser.parse_args() # Check if setup has been executed touchfile_path = os.path.join(HOME_DIR, 'onesmallstep') execute("if [ ! -f %s ]; then echo \"Run benchmark_setup and try again\"; exit 1; fi" % touchfile_path, cwd=HOME_DIR) dtf = datetime.datetime.now().strftime("%y-%m-%d-%H-%M-%S") appname_format = "{dtf}-{expt_name}-{param}-{paramval}" spark_submit_cmd_format = ("{spark_submit} " "--jars {lib_jar_path} " "--class \"{class_name}\" " "{spark_args} " "{examples_jar_path} " "{solver_options_args} " "--kwargs k1=v1,{app_args}") config = ConfigParser.ConfigParser() config.read(args.expt_config) expt_name = config.get("general", "experiment_name") class_name = config.get("general", "class_name") if args.ds: assert ('debug' in config.options('dissolve_args')) # Want debug set to true, in case of double-debugging assert (str_to_bool(config.get('dissolve_args', 'debug'))) # Pivot values pivot_param = config.get("pivot", "param") assert (pivot_param in VALID_PARAMS) pivot_values_raw = config.get("pivot", "values") pivot_values = map(lambda x: x.strip(), pivot_values_raw.split(",")) # Paths examples_jar_path = EXAMPLES_JAR_PATH spark_submit_path = "spark-submit" input_path = config.get("paths", "input_path") output_dir = EXPT_OUTPUT_DIR local_output_expt_dir = os.path.join(output_dir, "%s_%s" % (expt_name, dtf)) if not os.path.exists(local_output_expt_dir): os.makedirs(local_output_expt_dir) dissolve_lib_jar_path = LIB_JAR_PATH scopt_jar_path = SCOPT_JAR_PATH lib_jar_path = ','.join([dissolve_lib_jar_path, scopt_jar_path]) ''' Execute experiment ''' for pivot_val in pivot_values: print "=== %s = %s ===" % (pivot_param, pivot_val) ''' Construct command to execute on spark cluster ''' appname = appname_format.format(dtf=dtf, expt_name=expt_name, param=pivot_param, paramval=pivot_val) # === Construct Solver Options arguments === valued_parameter_args = ' '.join( ["--%s %s" % (k, v) for k, v in config.items("parameters") if k in VAL_PARAMS and k not in ['minpart']]) # Treat 'minpart' as a special case. If minpart = 'auto', set minpart = num_cores * num_executors if 'minpart' in config.options('parameters'): if config.get('parameters', 'minpart') == 'auto': if pivot_param == 'num-executors': num_executors = int(pivot_val) else: num_executors = config.getint('spark_args', 'num-executors') minpart = DEFAULT_CORES * num_executors else: minpart = config.getint('parameters', 'minpart') minpart_arg = '--minpart %d' % minpart valued_parameter_args = ' '.join([valued_parameter_args, minpart_arg]) boolean_parameter_args = ' '.join( ["--%s" % k for k, v in config.items("parameters") if k in BOOL_PARAMS and str_to_bool(v)]) valued_dissolve_args = ' '.join( ["--%s %s" % (k, v) for k, v in config.items("dissolve_args") if k in VAL_PARAMS]) boolean_dissolve_args = ' '.join( ["--%s" % k for k, v in config.items("dissolve_args") if k in BOOL_PARAMS and str_to_bool(v)]) solver_options_args = ' '.join( [valued_parameter_args, boolean_parameter_args, valued_dissolve_args, boolean_dissolve_args]) # === Construct Spark arguments === spark_args = ' '.join(["--%s %s" % (k, v) for k, v in config.items("spark_args")]) # === Add the pivotal parameter === assert (pivot_param not in config.options("parameters")) assert (pivot_param not in config.options("spark_args")) pivot_param_arg = "--%s %s" % (pivot_param, pivot_val) # Is this pivotal parameters a spark argument or a dissolve argument? if pivot_param in ['num-executors', ]: spark_args = ' '.join([spark_args, pivot_param_arg]) else: solver_options_args = ' '.join([solver_options_args, pivot_param_arg]) # == Construct App-specific arguments === debug_filename = "%s.csv" % appname debug_file_path = os.path.join('', debug_filename) default_app_args = ("appname={appname}," "input_path={input_path}," "debug_file={debug_file_path}").format(appname=appname, input_path=input_path, debug_file_path=debug_file_path) extra_app_args = ','.join(["%s=%s" % (k, v) for k, v in config.items("app_args")]) app_args = ','.join([default_app_args, extra_app_args]) spark_submit_cmd = spark_submit_cmd_format.format(spark_submit=spark_submit_path, lib_jar_path=lib_jar_path, class_name=class_name, examples_jar_path=examples_jar_path, spark_args=spark_args, solver_options_args=solver_options_args, app_args=app_args) ''' Execute Command ''' print "Executing:\n%s" % spark_submit_cmd execute(spark_submit_cmd) ''' If enabled, execute command again, but without the debug flag ''' if args.ds: no_debug_appname = appname + '.no_debug' debug_filename = "%s.csv" % no_debug_appname debug_file_path = os.path.join('', debug_filename) default_app_args = ("appname={appname}," "input_path={input_path}," "debug_file={debug_file_path}").format(appname=no_debug_appname, input_path=input_path, debug_file_path=debug_file_path) extra_app_args = ','.join(["%s=%s" % (k, v) for k, v in config.items("app_args")]) app_args = ','.join([default_app_args, extra_app_args]) # Get rid of the debugging flag solver_options_args = re.sub(' --debug$', ' ', solver_options_args) solver_options_args = re.sub(' --debug ', ' ', solver_options_args) no_debug_spark_submit_cmd = spark_submit_cmd_format.format(spark_submit=spark_submit_path, lib_jar_path=lib_jar_path, class_name=class_name, examples_jar_path=examples_jar_path, spark_args=spark_args, solver_options_args=solver_options_args, app_args=app_args) print "Executing WITHOUT debugging:\n%s" % no_debug_spark_submit_cmd execute(no_debug_spark_submit_cmd) if __name__ == '__main__': main()
{ "content_hash": "ebf5a792686e444479e95ac5adc7ef67", "timestamp": "", "source": "github", "line_count": 200, "max_line_length": 125, "avg_line_length": 45.475, "alnum_prop": 0.5136888400219901, "repo_name": "dalab/dissolve-struct", "id": "27224ca08ee9b63c4b5f0d3cd73cb28f7776aeba", "size": "9095", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "helpers/brutus_runner.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "29381" }, { "name": "Scala", "bytes": "214652" }, { "name": "Shell", "bytes": "770" }, { "name": "TeX", "bytes": "31442" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('books', '0002_auto_20170804_0821'), ] operations = [ migrations.RenameField( model_name='book', old_name='author', new_name='authors', ), ]
{ "content_hash": "44a8d08070c3fc33e4bd60fbd9dcece0", "timestamp": "", "source": "github", "line_count": 18, "max_line_length": 45, "avg_line_length": 20, "alnum_prop": 0.5722222222222222, "repo_name": "nirajkvinit/python3-study", "id": "1fa9967a16f247b3ab6e971f5f51171131f09e9e", "size": "384", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "intro-django/readit/books/migrations/0003_auto_20170804_0841.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "6304" }, { "name": "HTML", "bytes": "33043" }, { "name": "JavaScript", "bytes": "437" }, { "name": "Python", "bytes": "1093883" }, { "name": "Shell", "bytes": "3686" } ], "symlink_target": "" }
import _plotly_utils.basevalidators class ColorbarValidator(_plotly_utils.basevalidators.CompoundValidator): def __init__(self, plotly_name="colorbar", parent_name="splom.marker", **kwargs): super(ColorbarValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, data_class_str=kwargs.pop("data_class_str", "ColorBar"), data_docs=kwargs.pop( "data_docs", """ bgcolor Sets the color of padded area. bordercolor Sets the axis line color. borderwidth Sets the width (in px) or the border enclosing this color bar. dtick Sets the step in-between ticks on this axis. Use with `tick0`. Must be a positive number, or special strings available to "log" and "date" axes. If the axis `type` is "log", then ticks are set every 10^(n*dtick) where n is the tick number. For example, to set a tick mark at 1, 10, 100, 1000, ... set dtick to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2. To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to log_10(5), or 0.69897000433. "log" has several special values; "L<f>", where `f` is a positive number, gives ticks linearly spaced in value (but not position). For example `tick0` = 0.1, `dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus small digits between, use "D1" (all digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and "D2". If the axis `type` is "date", then you must convert the time to milliseconds. For example, to set the interval between ticks to one day, set `dtick` to 86400000.0. "date" also has special values "M<n>" gives ticks spaced by a number of months. `n` must be a positive integer. To set ticks on the 15th of every third month, set `tick0` to "2000-01-15" and `dtick` to "M3". To set ticks every 4 years, set `dtick` to "M48" exponentformat Determines a formatting rule for the tick exponents. For example, consider the number 1,000,000,000. If "none", it appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If "power", 1x10^9 (with 9 in a super script). If "SI", 1G. If "B", 1B. len Sets the length of the color bar This measure excludes the padding of both ends. That is, the color bar length is this length minus the padding on both ends. lenmode Determines whether this color bar's length (i.e. the measure in the color variation direction) is set in units of plot "fraction" or in *pixels. Use `len` to set the value. nticks Specifies the maximum number of ticks for the particular axis. The actual number of ticks will be chosen automatically to be less than or equal to `nticks`. Has an effect only if `tickmode` is set to "auto". outlinecolor Sets the axis line color. outlinewidth Sets the width (in px) of the axis line. separatethousands If "true", even 4-digit integers are separated showexponent If "all", all exponents are shown besides their significands. If "first", only the exponent of the first tick is shown. If "last", only the exponent of the last tick is shown. If "none", no exponents appear. showticklabels Determines whether or not the tick labels are drawn. showtickprefix If "all", all tick labels are displayed with a prefix. If "first", only the first tick is displayed with a prefix. If "last", only the last tick is displayed with a suffix. If "none", tick prefixes are hidden. showticksuffix Same as `showtickprefix` but for tick suffixes. thickness Sets the thickness of the color bar This measure excludes the size of the padding, ticks and labels. thicknessmode Determines whether this color bar's thickness (i.e. the measure in the constant color direction) is set in units of plot "fraction" or in "pixels". Use `thickness` to set the value. tick0 Sets the placement of the first tick on this axis. Use with `dtick`. If the axis `type` is "log", then you must take the log of your starting tick (e.g. to set the starting tick to 100, set the `tick0` to 2) except when `dtick`=*L<f>* (see `dtick` for more info). If the axis `type` is "date", it should be a date string, like date data. If the axis `type` is "category", it should be a number, using the scale where each category is assigned a serial number from zero in the order it appears. tickangle Sets the angle of the tick labels with respect to the horizontal. For example, a `tickangle` of -90 draws the tick labels vertically. tickcolor Sets the tick color. tickfont Sets the color bar's tick label font tickformat Sets the tick label formatting rule using d3 formatting mini-languages which are very similar to those in Python. For numbers, see: https://github.com/d3/d3-3.x-api- reference/blob/master/Formatting.md#d3_format And for dates see: https://github.com/d3/d3-3.x-api- reference/blob/master/Time-Formatting.md#format We add one item to d3's date formatter: "%{n}f" for fractional seconds with n digits. For example, *2016-10-13 09:15:23.456* with tickformat "%H~%M~%S.%2f" would display "09~15~23.46" tickformatstops A tuple of :class:`plotly.graph_objects.splom.m arker.colorbar.Tickformatstop` instances or dicts with compatible properties tickformatstopdefaults When used in a template (as layout.template.dat a.splom.marker.colorbar.tickformatstopdefaults) , sets the default property values to use for elements of splom.marker.colorbar.tickformatstops ticklen Sets the tick length (in px). tickmode Sets the tick mode for this axis. If "auto", the number of ticks is set via `nticks`. If "linear", the placement of the ticks is determined by a starting position `tick0` and a tick step `dtick` ("linear" is the default value if `tick0` and `dtick` are provided). If "array", the placement of the ticks is set via `tickvals` and the tick text is `ticktext`. ("array" is the default value if `tickvals` is provided). tickprefix Sets a tick label prefix. ticks Determines whether ticks are drawn or not. If "", this axis' ticks are not drawn. If "outside" ("inside"), this axis' are drawn outside (inside) the axis lines. ticksuffix Sets a tick label suffix. ticktext Sets the text displayed at the ticks position via `tickvals`. Only has an effect if `tickmode` is set to "array". Used with `tickvals`. ticktextsrc Sets the source reference on Chart Studio Cloud for ticktext . tickvals Sets the values at which ticks on this axis appear. Only has an effect if `tickmode` is set to "array". Used with `ticktext`. tickvalssrc Sets the source reference on Chart Studio Cloud for tickvals . tickwidth Sets the tick width (in px). title :class:`plotly.graph_objects.splom.marker.color bar.Title` instance or dict with compatible properties titlefont Deprecated: Please use splom.marker.colorbar.title.font instead. Sets this color bar's title font. Note that the title's font used to be set by the now deprecated `titlefont` attribute. titleside Deprecated: Please use splom.marker.colorbar.title.side instead. Determines the location of color bar's title with respect to the color bar. Note that the title's location used to be set by the now deprecated `titleside` attribute. x Sets the x position of the color bar (in plot fraction). xanchor Sets this color bar's horizontal position anchor. This anchor binds the `x` position to the "left", "center" or "right" of the color bar. xpad Sets the amount of padding (in px) along the x direction. y Sets the y position of the color bar (in plot fraction). yanchor Sets this color bar's vertical position anchor This anchor binds the `y` position to the "top", "middle" or "bottom" of the color bar. ypad Sets the amount of padding (in px) along the y direction. """, ), **kwargs )
{ "content_hash": "cb9e59eedbc11a48960b9f3c40e17dba", "timestamp": "", "source": "github", "line_count": 228, "max_line_length": 85, "avg_line_length": 47.49561403508772, "alnum_prop": 0.5228552959645396, "repo_name": "plotly/python-api", "id": "1b45201545daed20eb5a6873d6bd4067bd0c4294", "size": "10829", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "packages/python/plotly/plotly/validators/splom/marker/_colorbar.py", "mode": "33188", "license": "mit", "language": [ { "name": "JavaScript", "bytes": "6870" }, { "name": "Makefile", "bytes": "1708" }, { "name": "Python", "bytes": "823245" }, { "name": "Shell", "bytes": "3238" } ], "symlink_target": "" }
import os,os.path,sys,datetime,subprocess,string,urllib,zipfile from time import strftime if sys.version_info[0] >= 3: import urllib.request as ur #------------------------ # 2015-02-05 D. O'Hara - requires wget # 2015-02-06 RSD - requires Python 3.2+, takes command line arguments # 2015-02-18 RSD - Fix combining CSVs, don't re-download existing files # 2015-12-10 Christophe Lambert -- converted script to python 2.7, and cleaned up command line arguments. # 2018-05-16 Mustafa Ascha - Added python3 compatibility for urllib import, and an 'all' option to download all files #------------------------ # This script will download and unzip SynPUF files from CMS. # # To run this script, you must have Python 2.7 installed on your system # From the command line, type: # python ppath/to/output # # This will download SynPUF files and extract them into path/to/output # # The SynPUF files are split into 20 sets of files. # # For more information about SynPUF see: # https://www.cms.gov/Research-Statistics-Data-and-Systems/Downloadable-Public-Use-Files/SynPUFs/DE_Syn_PUF.html # Read output directory from the command line if len(sys.argv) < 3 and not 'all' in sys.argv: print("usage: get_synpuf_files.py path/to/output/directory <SAMPLE> ... [SAMPLE]") print("where each SAMPLE is a number from 1 to 20, representing the 20 parts of the CMS data") quit(); SAMPLE_RANGE = [] for i in range(2,len(sys.argv)): try: x = int(sys.argv[i]) if(x <1 or x > 20): raise ValueError('Invalid sample number') SAMPLE_RANGE.append(x) except ValueError: if 'all' in sys.argv[i]: for number in [x + 1 for x in range(20)]: SAMPLE_RANGE.append(number) break print("Invalid sample number: " + sys.argv[i] + ". Must be 'all' or in range 1..20") quit() OUTPUT_DIRECTORY = sys.argv[1] if not os.path.exists(OUTPUT_DIRECTORY): os.makedirs(OUTPUT_DIRECTORY) #----------------------------------- #----------------------------------- def get_timestamp(): return strftime("%Y-%m-%d %H:%M:%S") #----------------------------------- #- download and unzip all the files for a sample # combine the 3 beneficiary files into 1 file #----------------------------------- def download_synpuf_files(sample_directory, sample_number): print('-'*80) print(get_timestamp(),' download_synpuf_files starting: sample_number=',sample_number) # as of 2015-02-06, files come from different places url_www_cms_gov = 'www.cms.gov/Research-Statistics-Data-and-Systems/Downloadable-Public-Use-Files/SynPUFs/Downloads' url_downloads_cms_gov = 'downloads.cms.gov/files' synpuf_files = [ [ url_www_cms_gov, 'DE1_0_2008_Beneficiary_Summary_File_Sample_~~.zip' ], [ url_downloads_cms_gov, 'DE1_0_2008_to_2010_Carrier_Claims_Sample_~~A.zip' ], [ url_downloads_cms_gov, 'DE1_0_2008_to_2010_Carrier_Claims_Sample_~~B.zip' ], [ url_www_cms_gov, 'DE1_0_2008_to_2010_Inpatient_Claims_Sample_~~.zip' ], [ url_www_cms_gov, 'DE1_0_2008_to_2010_Outpatient_Claims_Sample_~~.zip' ], [ url_downloads_cms_gov, 'DE1_0_2008_to_2010_Prescription_Drug_Events_Sample_~~.zip' ], [ url_www_cms_gov, 'DE1_0_2009_Beneficiary_Summary_File_Sample_~~.zip' ], [ url_www_cms_gov, 'DE1_0_2010_Beneficiary_Summary_File_Sample_~~.zip' ] ] download_directory = os.path.join(sample_directory,"DE_{0}".format(sample_number)) if not os.path.exists(download_directory): os.makedirs(download_directory) for base_url,sp_file in synpuf_files: sp_file = sp_file.replace('~~',str(sample_number)) # The link on cms.gov website for the following file has .csv.zip in it, so change the variable sp_file. # Also, the link for cms.gov has 'https' whereas the link for 'downloads.cms.gov' has 'http', so the # file_url has been modified based on the base_url. if sp_file == 'DE1_0_2008_to_2010_Carrier_Claims_Sample_11A.zip': # actual filename on CMS website has csv in it. sp_file = 'DE1_0_2008_to_2010_Carrier_Claims_Sample_11A.csv.zip' if base_url == url_downloads_cms_gov: #base urls have different protocols. one has http while other has https. file_url = 'http://{0}/{1}'.format(base_url, sp_file) elif base_url == url_www_cms_gov: file_url = 'https://{0}/{1}'.format(base_url, sp_file) if sp_file == 'DE1_0_2010_Beneficiary_Summary_File_Sample_1.zip': # website has a typo. this is a workaround file_url = 'https://www.cms.gov/Research-Statistics-Data-and-Systems/Statistics-Trends-and-Reports/SynPUFs/Downloads/DE1_0_2010_Beneficiary_Summary_File_Sample_20.zip' if '.csv.zip' in sp_file: #downloaded file name shouldn't have .csv.zip. sp_file = sp_file.replace('.csv.zip', '.zip') file_local = os.path.join(download_directory,sp_file) # If the file already exists, let's not download it again # If a file is only partially downloaded, it will need to be deleted # before running this script again. if os.path.exists(file_local): print('..already exists: skipping', file_local) continue else: print('..downloading -> ', file_url) if sys.version_info[0] >= 3: ur.urlretrieve(file_url, filename=file_local) else: urllib.urlretrieve(file_url, filename=file_local) zipfile.ZipFile(file_local).extractall(download_directory) #--------------------------------------------------------------------------------------- # some files in the zipped folder have Copy.csv in their names. The following code will # read all the files in the download folder and remove Copy from file name. #--------------------------------------------------------------------------------------- for filename in os.listdir(download_directory): if ' - Copy.csv' in filename: filename1 = filename.replace(' - Copy.csv', '.csv') print ('..Renaming file ->', filename) o_filepath = os.path.join(download_directory, filename) # old file path n_filepath = os.path.join(download_directory, filename1) # new file path os.rename(o_filepath, n_filepath) # rename the old file #-- combine the beneficiary files combine_beneficiary_files(download_directory, sample_number) print(get_timestamp(),' Done') #----------------------------------- #- combine 3 beneficiary files into 1, with the year prefixed #----------------------------------- def combine_beneficiary_files(output_directory, sample_number): print('-'*80) print(get_timestamp(),' combine_beneficiary_files starting: sample_number=',sample_number) output_bene_filename = os.path.join(output_directory , 'DE1_0_comb_Beneficiary_Summary_File_Sample_{0}.csv'.format(sample_number)) print('Writing to ->',output_bene_filename) total_recs_in=0 total_recs_out=0 with open(output_bene_filename, 'w') as f_out: for year in ['2008','2009','2010']: input_bene_filename = os.path.join(output_directory, 'DE1_0_{0}_Beneficiary_Summary_File_Sample_{1}.csv'.format(year,sample_number)) print('Reading ->',input_bene_filename) recs_in=0 with open(input_bene_filename, 'r') as f_in: for line in f_in: tyear = year recs_in+=1 # We need to use the header line from the first # file we encounter to serve as the header line for the # combined file, but skip all other header lines in the # remaining files if recs_in == 1: if total_recs_out == 0: tyear = '"YEAR"' else: continue if recs_in % 25000 == 0: print('Year-{0}: records read ={1}, total written={2}'.format(year,recs_in, total_recs_out)) f_out.write(tyear + ',' + line) total_recs_out+=1 print('Year-{0}: total records read ={1}'.format(year,recs_in)) total_recs_in+=recs_in print(get_timestamp(),' Done: total records read ={0}, total records written={1}'.format(total_recs_in, total_recs_out)) #----------------------------------- #----------------------------------- if __name__ == '__main__': print(get_timestamp(),' Combine Beneficiary Year files...starting') print('OUTPUT_DIRECTORY =', OUTPUT_DIRECTORY) print('SAMPLE_RANGE =', SAMPLE_RANGE) #------ # download from CMS #------ for sample_number in SAMPLE_RANGE: download_synpuf_files(OUTPUT_DIRECTORY, sample_number) print(get_timestamp(),' Done')
{ "content_hash": "eadea5c5e8c371efa50230de265f3e5e", "timestamp": "", "source": "github", "line_count": 194, "max_line_length": 179, "avg_line_length": 47.24226804123711, "alnum_prop": 0.5788325150027278, "repo_name": "OHDSI/ETL-CMS", "id": "559236e9bad4b1ae5757e22bb2826d8a9598b4b2", "size": "9165", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "scripts/get_synpuf_files.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "193309" }, { "name": "Ruby", "bytes": "775" } ], "symlink_target": "" }
import os import pytest import shutil import uuid from django.conf import settings from blog.models import Post from core.services import ( convert_to_mp3_preview, convert_to_ogg_preview, convert_to_ogg_release, ) class FFMpegTest: @classmethod @pytest.mark.django_db def setup_class(cls): cls.data = { "name": str(uuid.uuid4()), "is_music": False, "release": "release/test.mp3", } release_path = os.path.join(settings.MEDIA_ROOT, "release") if not os.path.exists(release_path): os.makedirs(release_path) preview_path = os.path.join(settings.MEDIA_ROOT, "preview") if not os.path.exists(preview_path): os.makedirs(preview_path) shutil.copy( os.path.join(settings.PROJECT_DIR, "static", "test", "test.mp3"), os.path.join(settings.MEDIA_ROOT, "release", "test.mp3"), ) @classmethod def teardown_class(cls): release_path = os.path.join(settings.MEDIA_ROOT, "release") if os.path.exists(release_path): shutil.rmtree(release_path) preview_path = os.path.join(settings.MEDIA_ROOT, "preview") if os.path.exists(preview_path): shutil.rmtree(preview_path) @pytest.mark.django_db def test_convert_to_mp3_preview(self): post = Post.objects.create(**self.data) code = convert_to_mp3_preview(post.id) assert code == 0 assert post.release_mp3_file is not None assert os.path.getsize(post.preview_mp3_file) > 0 @pytest.mark.django_db def test_convert_to_ogg_release(self): post = Post.objects.create(**self.data) code = convert_to_ogg_release(post.id) assert code == 0 assert post.release_mp3_file is not None assert os.path.getsize(post.release_ogg_file) > 0 @pytest.mark.django_db def test_convert_to_ogg_preview(self): post = Post.objects.create(**self.data) code = convert_to_ogg_preview(post.id) assert code == 0 assert post.preview_ogg_file is not None assert os.path.getsize(post.preview_ogg_file) > 0
{ "content_hash": "24fff712dcc6602cf4b562c2ad36140d", "timestamp": "", "source": "github", "line_count": 74, "max_line_length": 77, "avg_line_length": 29.58108108108108, "alnum_prop": 0.6180904522613065, "repo_name": "manti-by/M2MICRO", "id": "04c6379b9237bd07ad9761c1ee2e3eb0e58c19cd", "size": "2189", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "app/blog/tests/test_ffmpeg.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "ApacheConf", "bytes": "13675" }, { "name": "Batchfile", "bytes": "518" }, { "name": "CSS", "bytes": "32089" }, { "name": "HTML", "bytes": "53" }, { "name": "JavaScript", "bytes": "30285" }, { "name": "PHP", "bytes": "573567" }, { "name": "PLSQL", "bytes": "910" }, { "name": "SQLPL", "bytes": "17657" }, { "name": "Shell", "bytes": "13408" } ], "symlink_target": "" }
from midonetclient import resource_base from midonetclient import vendor_media_type class L2Service(resource_base.ResourceBase): media_type = vendor_media_type.APPLICATION_L2SERVICE_JSON def __init__(self, uri, dto, auth): super(L2Service, self).__init__(uri, dto, auth) def get_id(self): return self.dto['id'] def get_port(self): return self.dto['port'] def id(self, id): self.dto['id'] = id return self def port(self, port): self.dto['port'] = port return self
{ "content_hash": "95421afc398ee1feeb25367b62432617", "timestamp": "", "source": "github", "line_count": 24, "max_line_length": 61, "avg_line_length": 22.958333333333332, "alnum_prop": 0.6206896551724138, "repo_name": "celebdor/python-midonetclient", "id": "8e74dab7ce7ac3f3eb13347e3aabd24ffb0d663f", "size": "1197", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "src/midonetclient/l2service.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Protocol Buffer", "bytes": "896" }, { "name": "Python", "bytes": "439803" }, { "name": "Shell", "bytes": "3982" } ], "symlink_target": "" }
from django.core.exceptions import ValidationError from django.utils.translation import ugettext_lazy as _ from rest_framework import status from rest_framework.exceptions import APIException class DefaultException(APIException): pass class PaymentFailed(DefaultException): status_code = status.HTTP_402_PAYMENT_REQUIRED default_detail = _('The payment failed.') class EventProductLimitExceeded(DefaultException): status_code = status.HTTP_400_BAD_REQUEST default_detail = _('Exceeding event quantity limit.') class PersonalProductLimitExceeded(DefaultException): status_code = status.HTTP_400_BAD_REQUEST default_detail = _('Exceeding personal quantity limit.') class TotalProductLimitExceeded(DefaultException): status_code = status.HTTP_400_BAD_REQUEST default_detail = _('Exceeding total quantity limit.') class ModifiesHistory(DefaultException): status_code = status.HTTP_400_BAD_REQUEST default_detail = _('Modifies history.') class MultipleOrganizations(DefaultException): status_code = status.HTTP_400_BAD_REQUEST default_detail = _('Tickets from multiple organizations ordered.') class ConflictingTicketTypes(DefaultException): status_code = status.HTTP_400_BAD_REQUEST default_detail = _('Conflicting ticket types selected.') class InvalidSession(Exception): pass class InvalidVariationChoices(ValidationError): pass class ExceedsLimit(ValidationError): pass
{ "content_hash": "e72dc0381c553fc8011b65ace3734e49", "timestamp": "", "source": "github", "line_count": 56, "max_line_length": 70, "avg_line_length": 26.142857142857142, "alnum_prop": 0.76775956284153, "repo_name": "ovidner/bitket", "id": "0235d99babd9925c008e8d19009a4e5e9ea0f6c7", "size": "1464", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "bitket/exceptions.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "2021" }, { "name": "HTML", "bytes": "4404" }, { "name": "JavaScript", "bytes": "99011" }, { "name": "Python", "bytes": "138914" }, { "name": "Shell", "bytes": "68" } ], "symlink_target": "" }
class Image(object): """ """ def __init__(self, param=None, hdr=None, orientation=None, absolutepath="", verbose=1): from numpy import zeros, ndarray, generic from sct_utils import extract_fname from nibabel import AnalyzeHeader # initialization of all parameters self.verbose = verbose self.data = None self.absolutepath = "" self.path = "" self.file_name = "" self.ext = "" self.orientation = None if hdr == None: hdr = AnalyzeHeader() self.hdr = AnalyzeHeader() #an empty header else: self.hdr = hdr self.dim = None self.verbose = verbose # load an image from file if type(param) is str: self.loadFromPath(param, verbose) # copy constructor elif isinstance(param, type(self)): self.copy(param) # create an empty image (full of zero) of dimension [dim]. dim must be [x,y,z] or (x,y,z). No header. elif type(param) is list: self.data = zeros(param) self.dim = param self.hdr = hdr self.orientation = orientation self.absolutepath = absolutepath self.path, self.file_name, self.ext = extract_fname(absolutepath) # create a copy of im_ref elif isinstance(param, (ndarray, generic)): self.data = param self.dim = self.data.shape self.hdr = hdr self.orientation = orientation self.absolutepath = absolutepath self.path, self.file_name, self.ext = extract_fname(absolutepath) else: raise TypeError(' Image constructor takes at least one argument.') def __deepcopy__(self, memo): from copy import deepcopy return type(self)(deepcopy(self.data,memo),deepcopy(self.hdr,memo),deepcopy(self.orientation,memo),deepcopy(self.absolutepath,memo)) def copy(self, image=None): from copy import deepcopy from sct_utils import extract_fname if image is not None: self.data = deepcopy(image.data) self.dim = deepcopy(image.dim) self.hdr = deepcopy(image.hdr) self.orientation = deepcopy(image.orientation) self.absolutepath = deepcopy(image.absolutepath) self.path, self.file_name, self.ext = extract_fname(self.absolutepath) else: return deepcopy(self) def loadFromPath(self, path, verbose): """ This function load an image from an absolute path using nibabel library :param path: path of the file from which the image will be loaded :return: """ from nibabel import load, spatialimages from sct_utils import check_file_exist, printv, extract_fname, get_dimension from sct_orientation import get_orientation check_file_exist(path, verbose=verbose) try: im_file = load(path) except spatialimages.ImageFileError: printv('Error: make sure ' + path + ' is an image.', 1, 'error') self.orientation = get_orientation(path) self.data = im_file.get_data() self.hdr = im_file.get_header() self.absolutepath = path self.path, self.file_name, self.ext = extract_fname(path) nx, ny, nz, nt, px, py, pz, pt = get_dimension(path) self.dim = [nx, ny, nz] def setFileName(self, filename): from sct_utils import extract_fname self.absolutepath = filename self.path, self.file_name, self.ext = extract_fname(filename) def changeType(self, type=''): from numpy import uint8, uint16, uint32, uint64, int8, int16, int32, int64, float32, float64 """ Change the voxel type of the image :param type: if not set, the image is saved in standard type if 'minimize', image space is minimize if 'minimize_int', image space is minimize and values are approximated to integers (2, 'uint8', np.uint8, "NIFTI_TYPE_UINT8"), (4, 'int16', np.int16, "NIFTI_TYPE_INT16"), (8, 'int32', np.int32, "NIFTI_TYPE_INT32"), (16, 'float32', np.float32, "NIFTI_TYPE_FLOAT32"), (32, 'complex64', np.complex64, "NIFTI_TYPE_COMPLEX64"), (64, 'float64', np.float64, "NIFTI_TYPE_FLOAT64"), (256, 'int8', np.int8, "NIFTI_TYPE_INT8"), (512, 'uint16', np.uint16, "NIFTI_TYPE_UINT16"), (768, 'uint32', np.uint32, "NIFTI_TYPE_UINT32"), (1024,'int64', np.int64, "NIFTI_TYPE_INT64"), (1280, 'uint64', np.uint64, "NIFTI_TYPE_UINT64"), (1536, 'float128', _float128t, "NIFTI_TYPE_FLOAT128"), (1792, 'complex128', np.complex128, "NIFTI_TYPE_COMPLEX128"), (2048, 'complex256', _complex256t, "NIFTI_TYPE_COMPLEX256"), :return: """ if type == '': type = self.hdr.get_data_dtype() if type == 'minimize' or type == 'minimize_int': from numpy import nanmax, nanmin # compute max value in the image and choose the best pixel type to represent all the pixels within smallest memory space # warning: does not take intensity resolution into account, neither complex voxels max_vox = nanmax(self.data) min_vox = nanmin(self.data) # check if voxel values are real or integer isInteger = True if type == 'minimize': for vox in self.data.flatten(): if int(vox) != vox: isInteger = False break if isInteger: from numpy import iinfo, uint8, uint16, uint32, uint64 if min_vox >= 0: # unsigned if max_vox <= iinfo(uint8).max: type = 'uint8' elif max_vox <= iinfo(uint16): type = 'uint16' elif max_vox <= iinfo(uint32).max: type = 'uint32' elif max_vox <= iinfo(uint64).max: type = 'uint64' else: raise ValueError("Maximum value of the image is to big to be represented.") else: if max_vox <= iinfo(int8).max and min_vox >= iinfo(int8).min: type = 'int8' elif max_vox <= iinfo(int16).max and min_vox >= iinfo(int16).min: type = 'int16' elif max_vox <= iinfo(int32).max and min_vox >= iinfo(int32).min: type = 'int32' elif max_vox <= iinfo(int64).max and min_vox >= iinfo(int64).min: type = 'int64' else: raise ValueError("Maximum value of the image is to big to be represented.") else: from numpy import finfo, float32, float64 # if max_vox <= np.finfo(np.float16).max and min_vox >= np.finfo(np.float16).min: # type = 'np.float16' # not supported by nibabel if max_vox <= finfo(float32).max and min_vox >= finfo(float32).min: type = 'float32' elif max_vox <= finfo(float64).max and min_vox >= finfo(float64).min: type = 'float64' # print "The image has been set to "+type+" (previously "+str(self.hdr.get_data_dtype())+")" # change type of data in both numpy array and nifti header type_build = eval(type) self.data = type_build(self.data) self.hdr.set_data_dtype(type) def save(self, type=''): """ Write an image in a nifti file :param type: if not set, the image is saved in standard type if 'minimize', image space is minimize (2, 'uint8', np.uint8, "NIFTI_TYPE_UINT8"), (4, 'int16', np.int16, "NIFTI_TYPE_INT16"), (8, 'int32', np.int32, "NIFTI_TYPE_INT32"), (16, 'float32', np.float32, "NIFTI_TYPE_FLOAT32"), (32, 'complex64', np.complex64, "NIFTI_TYPE_COMPLEX64"), (64, 'float64', np.float64, "NIFTI_TYPE_FLOAT64"), (256, 'int8', np.int8, "NIFTI_TYPE_INT8"), (512, 'uint16', np.uint16, "NIFTI_TYPE_UINT16"), (768, 'uint32', np.uint32, "NIFTI_TYPE_UINT32"), (1024,'int64', np.int64, "NIFTI_TYPE_INT64"), (1280, 'uint64', np.uint64, "NIFTI_TYPE_UINT64"), (1536, 'float128', _float128t, "NIFTI_TYPE_FLOAT128"), (1792, 'complex128', np.complex128, "NIFTI_TYPE_COMPLEX128"), (2048, 'complex256', _complex256t, "NIFTI_TYPE_COMPLEX256"), """ from nibabel import Nifti1Image, save from sct_utils import printv if type != '': self.changeType(type) self.hdr.set_data_shape(self.data.shape) img = Nifti1Image(self.data, None, self.hdr) printv('saving ' + self.path + self.file_name + self.ext + '\n', verbose=self.verbose, type='normal') save(img, self.path + self.file_name + self.ext) # flatten the array in a single dimension vector, its shape will be (d, 1) compared to the flatten built in method # which would have returned (d,) def flatten(self): # return self.data.flatten().reshape(self.data.flatten().shape[0], 1) return self.data.flatten() # return a list of the image slices flattened def slices(self): slices = [] for slc in self.data: slices.append(slc.flatten()) return slices def getNonZeroCoordinates(self, sorting=None, reverse_coord=False, coordValue=False): """ This function return all the non-zero coordinates that the image contains. Coordinate list can also be sorted by x, y, z, or the value with the parameter sorting='x', sorting='y', sorting='z' or sorting='value' If reverse_coord is True, coordinate are sorted from larger to smaller. """ from msct_types import Coordinate from sct_utils import printv try: if len(self.dim) == 3: X, Y, Z = (self.data > 0).nonzero() list_coordinates = [Coordinate([X[i], Y[i], Z[i], self.data[X[i], Y[i], Z[i]]]) for i in range(0, len(X))] elif len(self.dim) == 2: X, Y = (self.data > 0).nonzero() list_coordinates = [Coordinate([X[i], Y[i], self.data[X[i], Y[i]]]) for i in range(0, len(X))] except Exception, e: printv('ERROR: Exception ' + str(e) + ' caught while geting non Zeros coordinates', 1, 'error') if coordValue: from msct_types import CoordinateValue if len(self.dim) == 3: list_coordinates = [CoordinateValue([X[i], Y[i], Z[i], self.data[X[i], Y[i], Z[i]]]) for i in range(0, len(X))] else: list_coordinates = [CoordinateValue([X[i], Y[i], self.data[X[i], Y[i]]]) for i in range(0, len(X))] else: from msct_types import Coordinate if len(self.dim) == 3: list_coordinates = [Coordinate([X[i], Y[i], Z[i], self.data[X[i], Y[i], Z[i]]]) for i in range(0, len(X))] else: list_coordinates = [Coordinate([X[i], Y[i], self.data[X[i], Y[i]]]) for i in range(0, len(X))] if sorting is not None: if reverse_coord not in [True, False]: raise ValueError('reverse_coord parameter must be a boolean') if sorting == 'x': list_coordinates = sorted(list_coordinates, key=lambda obj: obj.x, reverse=reverse_coord) elif sorting == 'y': list_coordinates = sorted(list_coordinates, key=lambda obj: obj.y, reverse=reverse_coord) elif sorting == 'z': list_coordinates = sorted(list_coordinates, key=lambda obj: obj.z, reverse=reverse_coord) elif sorting == 'value': list_coordinates = sorted(list_coordinates, key=lambda obj: obj.value, reverse=reverse_coord) else: raise ValueError("sorting parameter must be either 'x', 'y', 'z' or 'value'") return list_coordinates # crop the image in order to keep only voxels in the mask, therefore the mask's slices must be squares or # rectangles of the same size #orientation must be IRP to be able to go trough slices as first dimension # This method is called in sct_crop_over_mask script def crop_from_square_mask(self, mask, save=True): from numpy import asarray, zeros data_array = self.data data_mask = mask.data assert self.orientation == 'IRP' assert mask.orientation == 'IRP' print 'ORIGINAL SHAPE: ', data_array.shape, ' == ', data_mask.shape #if the image to crop is smaller than the mask in total, we assume the image was centered and add a padding to fit the mask's shape if data_array.shape != data_mask.shape: old_data_array = data_array pad_1 = int((data_mask.shape[1] - old_data_array.shape[1])/2 + 1) pad_2 = int((data_mask.shape[2] - old_data_array.shape[2])/2 + 1) data_array = zeros(data_mask.shape) for n_slice, data_slice in enumerate(data_array): data_slice[pad_1:pad_1+old_data_array.shape[1], pad_2:pad_2+old_data_array.shape[2]] = old_data_array[n_slice] ''' for n_slice, data_slice in enumerate(data_array): n_row_old_data_array = 0 for row in data_slice[pad_2:-pad_2-1]: row[pad_1:pad_1 + old_data_array.shape[1]] = old_data_array[n_slice, n_row_old_data_array] n_row_old_data_array += 1 ''' self.data = data_array if save: self.file_name += '_resized' self.save() data_array = asarray(data_array) data_mask = asarray(data_mask) new_data = [] buffer = [] buffer_mask = [] if len(data_array.shape) == 3: for n_slice, mask_slice in enumerate(data_mask): for n_row, row in enumerate(mask_slice): if sum(row) > 0: # and n_row<=data_array.shape[1] and n_slice<=data_array.shape[0]: buffer_mask.append(row) buffer.append(data_array[n_slice][n_row]) new_slice_mask = asarray(buffer_mask).T new_slice = asarray(buffer).T buffer = [] for n_row, row in enumerate(new_slice_mask): if sum(row) != 0: buffer.append(new_slice[n_row]) new_slice = asarray(buffer).T buffer_mask = [] buffer = [] new_data.append(new_slice) elif len(data_array.shape) == 2: for n_row, row in enumerate(data_mask): if sum(row) > 0: # and n_row<=data_array.shape[1] and n_slice<=data_array.shape[0]: buffer_mask.append(row) buffer.append(data_array[n_row]) new_slice_mask = asarray(buffer_mask).T new_slice = asarray(buffer).T buffer = [] for n_row, row in enumerate(new_slice_mask): if sum(row) != 0: buffer.append(new_slice[n_row]) new_data = asarray(buffer).T buffer_mask = [] buffer = [] new_data = asarray(new_data) # print data_mask self.data = new_data self.dim = self.data.shape # crop the image in order to keep only voxels in the mask # doesn't change the image dimension # This method is called in sct_crop_over_mask script def crop_from_mask(self, mask): from numpy import asarray, einsum data_array = self.data data_mask = mask.data assert data_array.shape == data_mask.shape array = asarray(data_array) data_mask = asarray(data_mask) #Element-wise matrix multiplication: new_data = None if len(data_array.shape) == 3: new_data = einsum('ijk,ijk->ijk', data_mask, array) elif len(data_array.shape) == 2: new_data = einsum('ij,ij->ij', data_mask, array) print 'SHAPE ', new_data.shape self.data = new_data def denoise_ornlm(self): from ornlm import ornlm import numpy as np dat = self.data.astype(np.float64) denoised = np.array(ornlm.ornlm(dat, 3, 1, np.max(dat)*0.01)) self.file_name += '_denoised' self.data = denoised def invert(self): self.data = self.data.max() - self.data return self def change_orientation(self, orientation='RPI', inversion_orient=False): """ This function changes the orientation of the data by swapping the image axis. Warning: the nifti image header is not changed!!! :param orientation: string of three character representing the new orientation (ex: AIL, default: RPI) inversion_orient: boolean. If True, the data change to match the orientation in the header, based on the orientation provided as the argument orientation. :return: """ opposite_character = {'L': 'R', 'R': 'L', 'A': 'P', 'P': 'A', 'I': 'S', 'S': 'I'} if self.orientation is None: from sct_orientation import get_orientation self.orientation = get_orientation(self.file_name) if inversion_orient: temp_orientation = self.orientation self.orientation = orientation orientation = temp_orientation # change the orientation of the image perm = [0, 1, 2] inversion = [1, 1, 1] for i, character in enumerate(self.orientation): try: perm[i] = orientation.index(character) except ValueError: perm[i] = orientation.index(opposite_character[character]) inversion[i] = -1 # axes inversion self.data = self.data[::inversion[0], ::inversion[1], ::inversion[2]] # axes manipulations from numpy import swapaxes if perm == [1, 0, 2]: self.data = swapaxes(self.data, 0, 1) elif perm == [2, 1, 0]: self.data = swapaxes(self.data, 0, 2) elif perm == [0, 2, 1]: self.data = swapaxes(self.data, 1, 2) elif perm == [2, 1, 0]: self.data = swapaxes(self.data, 0, 2) elif perm == [2, 0, 1]: self.data = swapaxes(self.data, 0, 2) # transform [2, 0, 1] to [1, 0, 2] self.data = swapaxes(self.data, 0, 1) # transform [1, 0, 2] to [0, 1, 2] elif perm == [1, 2, 0]: self.data = swapaxes(self.data, 0, 2) # transform [1, 2, 0] to [0, 2, 1] self.data = swapaxes(self.data, 1, 2) # transform [0, 2, 1] to [0, 1, 2] elif perm == [0, 1, 2]: # do nothing pass else: print 'Error: wrong orientation' self.orientation = orientation def show(self): from matplotlib.pyplot import imshow, show imgplot = imshow(self.data) imgplot.set_cmap('gray') imgplot.set_interpolation('nearest') show() def transfo_pix2phys(self, coordi=None): """ This function returns the physical coordinates of all points of 'coordi'. 'coordi' is a list of list of size (nb_points * 3) containing the pixel coordinate of points. The function will return a list with the physical coordinates of the points in the space of the image. Example: img = Image('file.nii.gz') coordi_pix = [[1,1,1],[2,2,2],[4,4,4]] # for points: (1,1,1), (2,2,2) and (4,4,4) coordi_phys = img.transfo_pix2phys(coordi=coordi_pix) :return: """ from numpy import zeros, array, transpose, dot, asarray m_p2f = self.hdr.get_sform() m_p2f_transfo = m_p2f[0:3,0:3] coord_origin = array([[m_p2f[0, 3]],[m_p2f[1, 3]], [m_p2f[2, 3]]]) if coordi != None: coordi_pix = transpose(asarray(coordi)) coordi_phys = transpose(coord_origin + dot(m_p2f_transfo, coordi_pix)) coordi_phys_list = coordi_phys.tolist() return coordi_phys_list def transfo_phys2pix(self, coordi=None): """ This function returns the pixels coordinates of all points of 'coordi' 'coordi' is a list of list of size (nb_points * 3) containing the pixel coordinate of points. The function will return a list with the physical coordinates of the points in the space of the image. :return: """ from numpy import array, transpose, dot, asarray from numpy.linalg import inv m_p2f = self.hdr.get_sform() m_p2f_transfo = m_p2f[0:3,0:3] m_f2p_transfo = inv(m_p2f_transfo) coord_origin = array([[m_p2f[0, 3]],[m_p2f[1, 3]], [m_p2f[2, 3]]]) if coordi != None: coordi_phys = transpose(asarray(coordi)) coordi_pix = transpose(dot(m_f2p_transfo, (coordi_phys-coord_origin))) coordi_pix_tmp = coordi_pix.tolist() coordi_pix_list = [[int(round(coordi_pix_tmp[j][i])) for i in range(len(coordi_pix_tmp[j]))] for j in range(len(coordi_pix_tmp))] return coordi_pix_list def transfo_phys2continuouspix(self, coordi=None, data_phys=None): """ This function returns the pixels coordinates of all points of data_pix in the space of the image. The output is a matrix of size: size(data_phys) but containing a 3D vector. This vector is the pixel position of the point in the space of the image. data_phys must be an array of 3 dimensions for which each point contains a vector (physical position of the point). If coordi is different from none: coordi is a list of list of size (nb_points * 3) containing the pixel coordinate of points. The function will return a list with the physical coordinates of the points in the space of the image. :return: """ from numpy import array, transpose, dot, asarray from numpy.linalg import inv from copy import copy m_p2f = self.hdr.get_sform() m_p2f_transfo = m_p2f[0:3, 0:3] m_f2p_transfo = inv(m_p2f_transfo) # e = dot(m_p2f_transfo, m_f2p_transfo) coord_origin = array([[m_p2f[0, 3]], [m_p2f[1, 3]], [m_p2f[2, 3]]]) if coordi != None: coordi_phys = transpose(asarray(coordi)) coordi_pix = transpose(dot(m_f2p_transfo, (coordi_phys - coord_origin))) coordi_pix_tmp = coordi_pix.tolist() coordi_pix_list = [[coordi_pix_tmp[j][i] for i in range(len(coordi_pix_tmp[j]))] for j in range(len(coordi_pix_tmp))] return coordi_pix_list def pad_image(fname_in, file_out, padding): import sct_utils as sct sct.run('isct_c3d '+fname_in+' -pad 0x0x'+str(padding)+'vox 0x0x'+str(padding)+'vox 0 -o '+file_out, 1) return def find_zmin_zmax(fname): import sct_utils as sct # crop image status, output = sct.run('sct_crop_image -i '+fname+' -dim 2 -bmax -o tmp.nii') # parse output zmin, zmax = output[output.find('Dimension 2: ')+13:].split('\n')[0].split(' ') return int(zmin), int(zmax) # ======================================================================================================================= # Start program #======================================================================================================================= if __name__ == "__main__": from msct_parser import Parser import sys parser = Parser(__file__) parser.usage.set_description('Image') parser.add_option("-i", "file", "file", True) arguments = parser.parse(sys.argv[1:]) image = Image(arguments["-i"]) image.changeType('minimize')
{ "content_hash": "9b8dd53f7fdcf906a22663a11d1f4d31", "timestamp": "", "source": "github", "line_count": 562, "max_line_length": 204, "avg_line_length": 43.86476868327402, "alnum_prop": 0.5487587214019146, "repo_name": "3324fr/spinalcordtoolbox", "id": "a3471605f7cea992a8a0b4e68d189794c6013d26", "size": "25164", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "dev/sct_segment_gray_matter_asman/2015-08-17-full_scripts_before_cleaning/msct_image.py", "mode": "33261", "license": "mit", "language": [ { "name": "C", "bytes": "5961" }, { "name": "C++", "bytes": "1025992" }, { "name": "CMake", "bytes": "18919" }, { "name": "CSS", "bytes": "1384" }, { "name": "Groff", "bytes": "3141" }, { "name": "HTML", "bytes": "5315" }, { "name": "JavaScript", "bytes": "2505" }, { "name": "KiCad", "bytes": "5522" }, { "name": "Matlab", "bytes": "275100" }, { "name": "Python", "bytes": "4808677" }, { "name": "Shell", "bytes": "193192" } ], "symlink_target": "" }
""" Given a collection of intervals, merge all overlapping intervals. For example, Given [1,3],[2,6],[8,10],[15,18], return [1,6],[8,10],[15,18]. """ __author__ = 'Danyang' # Definition for an interval. class Interval(object): def __init__(self, s=0, e=0): self.start = s self.end = e class Solution(object): def merge(self, itvls): """ scanning. No algorithm math :param itvls: a list of Interval :return: a list of Interval """ if not itvls: return [] itvls.sort(key=lambda x: x.start) # sort first, since time complexity less than brute force ret = [itvls[0]] for cur in itvls[1:]: pre = ret[-1] if cur.start <= pre.end: # overlap pre.end = max(pre.end, cur.end) else: ret.append(cur) return ret def merge_error(self, itvls): """ scanning. No algorithm math :param itvls: a list of Interval :return: a list of Interval """ if not itvls: return [] ret = [itvls[0]] for interval in itvls[1:]: if ret[-1].end < interval.start: ret.append(interval) continue if ret[-1].start <= interval.start <= ret[-1].end <= interval.end: ret[-1].end = interval.end continue if interval.start <= ret[-1].start and ret[-1].end <= interval.end: ret[-1] = interval continue if ret[-1].start <= interval.start < ret[-1].end and ret[-1].start <= interval.end < ret[-1].end: ret.append(interval) continue if interval.start < ret[-1].start <= interval.end < ret[-1].end: ret[-1].start = interval.start continue if interval.end < ret[-1].start: ret.append(ret) continue return ret
{ "content_hash": "1dcff6fb2365de9036053076b5a7b9bb", "timestamp": "", "source": "github", "line_count": 71, "max_line_length": 109, "avg_line_length": 29.47887323943662, "alnum_prop": 0.4773053033922599, "repo_name": "algorhythms/LeetCode", "id": "554aefbaceec5d3d1a8211945a01d8010f29ea30", "size": "2093", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "055 Merge Intervals.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "1444167" } ], "symlink_target": "" }
""" Implementation of a TableEditor demo plugin for Traits UI demo program This demo shows the full behavior of a straightforward TableEditor. Only one style of TableEditor is implemented, so that is the one shown. """ # Import statements: from traits.api \ import HasTraits, HasStrictTraits, Str, Int, Regex, List from traitsui.api \ import View, Group, Item, TableEditor from traitsui.table_column \ import ObjectColumn, ExpressionColumn from traitsui.table_filter \ import EvalFilterTemplate, MenuFilterTemplate, RuleFilterTemplate, \ EvalTableFilter # A helper class for the 'Department' class below: class Employee(HasTraits): first_name = Str last_name = Str age = Int phone = Regex(value='000-0000', regex='\d\d\d[-]\d\d\d\d') traits_view = View( 'first_name', 'last_name', 'age', 'phone', title='Create new employee', width=0.18, buttons=['OK', 'Cancel'] ) # The definition of the demo TableEditor: table_editor = TableEditor( columns=[ObjectColumn(name='first_name', width=0.20), ObjectColumn(name='last_name', width=0.20), ExpressionColumn( label='Full Name', width=0.30, expression="'%s %s' % (object.first_name, " "object.last_name )"), ObjectColumn(name='age', width=0.10, horizontal_alignment='center'), ObjectColumn(name='phone', width=0.20)], deletable=True, sort_model=True, auto_size=False, orientation='vertical', edit_view=View( Group('first_name', 'last_name', 'age', 'phone', show_border=True ), resizable=True ), filters=[EvalFilterTemplate, MenuFilterTemplate, RuleFilterTemplate], search=EvalTableFilter(), show_toolbar=True, row_factory=Employee) # The class to be edited with the TableEditor: class Department(HasStrictTraits): employees = List(Employee) traits_view = View( Group( Item('employees', show_label=False, editor=table_editor ), show_border=True, ), title='TableEditor', width=.4, height=.4, resizable=True, buttons=['OK'], kind='live' ) # Create some employees: employees = [ Employee(first_name='Jason', last_name='Smith', age=32, phone='555-1111'), Employee(first_name='Mike', last_name='Tollan', age=34, phone='555-2222'), Employee(first_name='Dave', last_name='Richards', age=42, phone='555-3333'), Employee(first_name='Lyn', last_name='Spitz', age=40, phone='555-4444'), Employee(first_name='Greg', last_name='Andrews', age=45, phone='555-5555') ] # Create the demo: demo = Department(employees=employees) # Run the demo (if invoked from the command line): if __name__ == '__main__': demo.configure_traits()
{ "content_hash": "f16fd246d6b412baecb7de5526d516c0", "timestamp": "", "source": "github", "line_count": 107, "max_line_length": 73, "avg_line_length": 27.897196261682243, "alnum_prop": 0.6050251256281407, "repo_name": "marshallmcdonnell/interactive_plotting", "id": "85f53499ef76377c0e6c3ee665b9a9cfff24f31f", "size": "3071", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "TraitsUI/examples/TableEditor_demo.py", "mode": "33261", "license": "mit", "language": [ { "name": "Python", "bytes": "210205" } ], "symlink_target": "" }
import sys def main(filepath): with open(filepath, 'r') as f: for line in f.readlines(): if line: line = line.strip() print number_in_words(int(line)) + 'Dollars' def number_in_words(number): # set the word lists numbers = ['Zero', 'One', 'Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine', 'Ten', 'Eleven', 'Twelve', 'Thirteen', 'Fourteen', 'Fifteen', 'Sixteen', 'Seventeen', 'Eighteen', 'Nineteen',] tens_words = ['Ten', 'Twenty', 'Thirty', 'Forty', 'Fifty', 'Sixty', 'Seventy', 'Eighty', 'Ninety',] power_words = ['Hundred', 'Thousand', 'Million',] # initialise empty result result = '' # how many millions? millions = number // 10**6 number -= millions * 10**6 if millions > 0: result += number_in_words(millions) + power_words[2] # how many thousands? thousands = number // 10**3 number -= thousands * 10**3 if thousands > 0: result += number_in_words(thousands) + power_words[1] # how many hundreds? hundreds = number // 10**2 number -= hundreds * 10**2 if hundreds > 0: result += numbers[hundreds] + power_words[0] # how many tens? tens = number // 10 if tens > 1: number -= tens * 10 result += tens_words[tens-1] # how many singles? singles = number if singles > 0: result += numbers[singles] elif result == '' and singles == 0: result = numbers[singles] return result if __name__ == '__main__': main(sys.argv[1])
{ "content_hash": "9588940a971ad6d3c58bea8af98c6c7b", "timestamp": "", "source": "github", "line_count": 64, "max_line_length": 64, "avg_line_length": 26.4375, "alnum_prop": 0.5153664302600472, "repo_name": "tdsymonds/codeeval", "id": "f767656d99cf6f6944719407443635ce02aa9a99", "size": "1692", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "python/hard/(52) text-dollar.py", "mode": "33188", "license": "mit", "language": [ { "name": "C#", "bytes": "574" }, { "name": "Java", "bytes": "5353" }, { "name": "JavaScript", "bytes": "316" }, { "name": "Python", "bytes": "73316" } ], "symlink_target": "" }
from distutils.core import setup from catkin_pkg.python_setup import generate_distutils_setup # fetch values from package.xml setup_args = generate_distutils_setup( packages=['exotica_examples_py'], package_dir={'': 'src'}) setup(**setup_args)
{ "content_hash": "b74ed29d68f408ac8b739df0e655af59", "timestamp": "", "source": "github", "line_count": 9, "max_line_length": 60, "avg_line_length": 28.22222222222222, "alnum_prop": 0.7401574803149606, "repo_name": "openhumanoids/exotica", "id": "713b24095f409f567798672f03459b318e3c1236", "size": "317", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "exotica_examples/setup.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "790420" }, { "name": "C++", "bytes": "660812" }, { "name": "CMake", "bytes": "24726" }, { "name": "Python", "bytes": "21562" }, { "name": "Shell", "bytes": "1270" }, { "name": "TeX", "bytes": "44880" } ], "symlink_target": "" }
from __future__ import absolute_import from ._example import file_example __all__ = ["file_example"]
{ "content_hash": "775cc54c50b789018e7ec5a88fb8abe1", "timestamp": "", "source": "github", "line_count": 5, "max_line_length": 38, "avg_line_length": 20.6, "alnum_prop": 0.6990291262135923, "repo_name": "Horta/limix", "id": "654925a0afabfb8839d89fabe829372cef29d0ea", "size": "103", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "limix/example/__init__.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "1842" }, { "name": "C", "bytes": "1550482" }, { "name": "C++", "bytes": "8073525" }, { "name": "CMake", "bytes": "21097" }, { "name": "Fortran", "bytes": "363470" }, { "name": "M4", "bytes": "16520" }, { "name": "Makefile", "bytes": "11605" }, { "name": "Matlab", "bytes": "25435" }, { "name": "PowerShell", "bytes": "3104" }, { "name": "Python", "bytes": "1735015" }, { "name": "Roff", "bytes": "66747" }, { "name": "Shell", "bytes": "18797" }, { "name": "TeX", "bytes": "26251" } ], "symlink_target": "" }
''' sota.parser ''' from .parser import SotaParser
{ "content_hash": "be416e83b754318d0d7e4c8dffb209b0", "timestamp": "", "source": "github", "line_count": 5, "max_line_length": 30, "avg_line_length": 10.4, "alnum_prop": 0.6923076923076923, "repo_name": "sota/lang", "id": "aefdd6daa205cb6495ef9621ce2f80779ff3390a", "size": "101", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "sota/parser/__init__.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "3192" }, { "name": "C++", "bytes": "11032" }, { "name": "Makefile", "bytes": "1761" }, { "name": "Python", "bytes": "22933" }, { "name": "Ragel", "bytes": "5337" }, { "name": "Shell", "bytes": "902" } ], "symlink_target": "" }
inversion_inicial = float(raw_input("Inversion inicial: ")) tasa = int(raw_input("% tasa de descuento: ")) #definimos algunas variables van = -inversion_inicial #La inversion inicial es negativa mes = 1 #Preguntamos por cada mes, hasta que VAN quede positivo. while (van < 0): flujo_mes = int(raw_input("Flujo mes "+str(mes)+": ")) van += flujo_mes/((1+(tasa/100.0))**mes) print "VAN: "+str(int(van)) #Mostramos la parte entera del float van mes+=1;
{ "content_hash": "6fd3fc05aff33b4c6fef4881773dd95b", "timestamp": "", "source": "github", "line_count": 12, "max_line_length": 69, "avg_line_length": 37.833333333333336, "alnum_prop": 0.6938325991189427, "repo_name": "csaldias/python-usm", "id": "c5177b9f46852af4a90a7c5e732ce5ef1c7f22e3", "size": "511", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Certámenes resueltos/Certamen 1 2011-1/pregunta-4.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "139112" } ], "symlink_target": "" }
from __future__ import print_function import sys from term2048.game import Game # set this to true when unit testing debug = False __has_argparse = True try: import argparse except ImportError: __has_argparse = False def __print_argparse_warning(): """print a warning for Python 2.6 users who don't have argparse""" print("""WARNING: You seems to be running Python 2.6 without 'argparse'. Please install the module so I can handle your options: [sudo] pip install argparse I'll continue without processing any option.""") def print_version_and_exit(): from term2048 import __version__ print("term2048 v%s" % __version__) sys.exit(0) def print_rules_and_exit(): print("""Use your arrow keys to move the tiles. When two tiles with the same value touch they merge into one with the sum of their value! Try to reach 2048 to win.""") sys.exit(0) def parse_cli_args(): """parse args from the CLI and return a dict""" parser = argparse.ArgumentParser(description='2048 in your terminal') parser.add_argument('--mode', dest='mode', type=str, default=None, help='colors mode (dark or light)') parser.add_argument('--az', dest='azmode', action='store_true', help='Use the letters a-z instead of numbers') parser.add_argument('--version', action='store_true') parser.add_argument('--rules', action='store_true') return vars(parser.parse_args()) def start_game(): """start a new game""" if not __has_argparse: __print_argparse_warning() args = {} else: args = parse_cli_args() if args['version']: print_version_and_exit() if args['rules']: print_rules_and_exit() if not debug: for _ in range(1): Game(**args).loop()
{ "content_hash": "620cedd44f4931af2a5d9fb43d35353c", "timestamp": "", "source": "github", "line_count": 66, "max_line_length": 77, "avg_line_length": 28.303030303030305, "alnum_prop": 0.6215203426124197, "repo_name": "davidnk/term2048solver", "id": "22f074c974d91088ae1ccbd2f87e51a74143007f", "size": "1892", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "term2048/ui.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "42997" } ], "symlink_target": "" }
import socket from django.core.management import call_command from django.template.loader import render_to_string from dimagi.utils import gitinfo from django.core.management.base import BaseCommand from corehq.apps.hqadmin.models import HqDeploy from datetime import datetime from optparse import make_option from django.conf import settings from pillow_retry.models import PillowError class Command(BaseCommand): help = "Creates an HqDeploy document to record a successful deployment." args = "[user]" option_list = BaseCommand.option_list + ( make_option('--user', help='User', default=False), make_option('--environment', help='Environment {production|staging etc...}', default=settings.SERVER_ENVIRONMENT), make_option('--mail_admins', help='Mail Admins', default=False, action='store_true'), make_option('--url', help='A link to a URL for the deploy', default=False), ) def handle(self, *args, **options): root_dir = settings.FILEPATH git_snapshot = gitinfo.get_project_snapshot(root_dir, submodules=True) git_snapshot['diff_url'] = options.get('url', None) deploy = HqDeploy( date=datetime.utcnow(), user=options['user'], environment=options['environment'], code_snapshot=git_snapshot, ) deploy.save() # reset PillowTop errors in the hope that a fix has been deployed rows_updated = PillowError.bulk_reset_attempts(datetime.utcnow()) if rows_updated: print "\n---------------- Pillow Errors Reset ----------------\n" \ "{} pillow errors queued for retry\n".format(rows_updated) if options['mail_admins']: snapshot_table = render_to_string('hqadmin/partials/project_snapshot.html', dictionary={'snapshot': git_snapshot}) message = "Deployed by %s, cheers!" % options['user'] snapshot_body = "<html><head><title>Deploy Snapshot</title></head><body><h2>%s</h2>%s</body></html>" % (message, snapshot_table) call_command('mail_admins', snapshot_body, **{'subject': 'Deploy successful', 'html': True})
{ "content_hash": "cbb98557633066c50703d24677897120", "timestamp": "", "source": "github", "line_count": 49, "max_line_length": 140, "avg_line_length": 44.38775510204081, "alnum_prop": 0.6501149425287356, "repo_name": "puttarajubr/commcare-hq", "id": "9f055758f7bfa332ab59447d21ad8366abf04f0b", "size": "2175", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "corehq/apps/hqadmin/management/commands/record_deploy_success.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "ActionScript", "bytes": "15950" }, { "name": "CSS", "bytes": "581878" }, { "name": "HTML", "bytes": "2790361" }, { "name": "JavaScript", "bytes": "2572023" }, { "name": "Makefile", "bytes": "3999" }, { "name": "Python", "bytes": "11275678" }, { "name": "Shell", "bytes": "23890" } ], "symlink_target": "" }
"""Domain objects relating to stories.""" from __future__ import absolute_import # pylint: disable=import-only-modules from __future__ import unicode_literals # pylint: disable=import-only-modules import copy import json import re from constants import constants from core.domain import android_validation_constants from core.domain import change_domain from core.domain import html_cleaner from core.domain import html_validation_service import feconf import python_utils import utils # Do not modify the values of these constants. This is to preserve backwards # compatibility with previous change dicts. STORY_PROPERTY_TITLE = 'title' STORY_PROPERTY_THUMBNAIL_BG_COLOR = 'thumbnail_bg_color' STORY_PROPERTY_THUMBNAIL_FILENAME = 'thumbnail_filename' STORY_PROPERTY_DESCRIPTION = 'description' STORY_PROPERTY_NOTES = 'notes' STORY_PROPERTY_LANGUAGE_CODE = 'language_code' STORY_PROPERTY_URL_FRAGMENT = 'url_fragment' STORY_PROPERTY_META_TAG_CONTENT = 'meta_tag_content' STORY_NODE_PROPERTY_DESTINATION_NODE_IDS = 'destination_node_ids' STORY_NODE_PROPERTY_ACQUIRED_SKILL_IDS = 'acquired_skill_ids' STORY_NODE_PROPERTY_PREREQUISITE_SKILL_IDS = 'prerequisite_skill_ids' STORY_NODE_PROPERTY_OUTLINE = 'outline' STORY_NODE_PROPERTY_TITLE = 'title' STORY_NODE_PROPERTY_DESCRIPTION = 'description' STORY_NODE_PROPERTY_THUMBNAIL_BG_COLOR = 'thumbnail_bg_color' STORY_NODE_PROPERTY_THUMBNAIL_FILENAME = 'thumbnail_filename' STORY_NODE_PROPERTY_EXPLORATION_ID = 'exploration_id' INITIAL_NODE_ID = 'initial_node_id' NODE = 'node' CMD_MIGRATE_SCHEMA_TO_LATEST_VERSION = 'migrate_schema_to_latest_version' # These take additional 'property_name' and 'new_value' parameters and, # optionally, 'old_value'. CMD_UPDATE_STORY_PROPERTY = 'update_story_property' CMD_UPDATE_STORY_NODE_PROPERTY = 'update_story_node_property' CMD_UPDATE_STORY_CONTENTS_PROPERTY = 'update_story_contents_property' # These take node_id as parameter. CMD_ADD_STORY_NODE = 'add_story_node' CMD_DELETE_STORY_NODE = 'delete_story_node' CMD_UPDATE_STORY_NODE_OUTLINE_STATUS = 'update_story_node_outline_status' # This takes additional 'title' parameters. CMD_CREATE_NEW = 'create_new' CMD_CHANGE_ROLE = 'change_role' ROLE_MANAGER = 'manager' ROLE_NONE = 'none' # The prefix for all node ids of a story. NODE_ID_PREFIX = 'node_' class StoryChange(change_domain.BaseChange): """Domain object for changes made to story object. The allowed commands, together with the attributes: - 'add_story_node' (with node_id, title) - 'delete_story_node' (with node_id) - 'update_story_node_outline_status' (with node_id, old_value and new_value) - 'update_story_property' (with property_name, new_value and old_value) - 'update_story_node_property' (with property_name, new_value and old_value) - 'update_story_contents_property' (with property_name, new_value and old_value) - 'migrate_schema_to_latest_version' (with from_version and to_version) - 'create_new' (with title) """ # The allowed list of story properties which can be used in # update_story_property command. STORY_PROPERTIES = ( STORY_PROPERTY_TITLE, STORY_PROPERTY_THUMBNAIL_BG_COLOR, STORY_PROPERTY_THUMBNAIL_FILENAME, STORY_PROPERTY_DESCRIPTION, STORY_PROPERTY_NOTES, STORY_PROPERTY_LANGUAGE_CODE, STORY_PROPERTY_URL_FRAGMENT, STORY_PROPERTY_META_TAG_CONTENT) # The allowed list of story node properties which can be used in # update_story_node_property command. STORY_NODE_PROPERTIES = ( STORY_NODE_PROPERTY_DESTINATION_NODE_IDS, STORY_NODE_PROPERTY_ACQUIRED_SKILL_IDS, STORY_NODE_PROPERTY_PREREQUISITE_SKILL_IDS, STORY_NODE_PROPERTY_OUTLINE, STORY_NODE_PROPERTY_EXPLORATION_ID, STORY_NODE_PROPERTY_TITLE, STORY_NODE_PROPERTY_DESCRIPTION, STORY_NODE_PROPERTY_THUMBNAIL_BG_COLOR, STORY_NODE_PROPERTY_THUMBNAIL_FILENAME) # The allowed list of story content properties which can be used in # update_story_contents_property command. STORY_CONTENTS_PROPERTIES = (INITIAL_NODE_ID, NODE, ) ALLOWED_COMMANDS = [{ 'name': CMD_UPDATE_STORY_PROPERTY, 'required_attribute_names': ['property_name', 'new_value', 'old_value'], 'optional_attribute_names': [], 'user_id_attribute_names': [], 'allowed_values': {'property_name': STORY_PROPERTIES} }, { 'name': CMD_UPDATE_STORY_NODE_PROPERTY, 'required_attribute_names': [ 'node_id', 'property_name', 'new_value', 'old_value'], 'optional_attribute_names': [], 'user_id_attribute_names': [], 'allowed_values': {'property_name': STORY_NODE_PROPERTIES} }, { 'name': CMD_UPDATE_STORY_CONTENTS_PROPERTY, 'required_attribute_names': ['property_name', 'new_value', 'old_value'], 'optional_attribute_names': [], 'user_id_attribute_names': [], 'allowed_values': {'property_name': STORY_CONTENTS_PROPERTIES} }, { 'name': CMD_ADD_STORY_NODE, 'required_attribute_names': ['node_id', 'title'], 'optional_attribute_names': [], 'user_id_attribute_names': [] }, { 'name': CMD_DELETE_STORY_NODE, 'required_attribute_names': ['node_id'], 'optional_attribute_names': [], 'user_id_attribute_names': [] }, { 'name': CMD_UPDATE_STORY_NODE_OUTLINE_STATUS, 'required_attribute_names': ['node_id', 'old_value', 'new_value'], 'optional_attribute_names': [], 'user_id_attribute_names': [] }, { 'name': CMD_CREATE_NEW, 'required_attribute_names': ['title'], 'optional_attribute_names': [], 'user_id_attribute_names': [] }, { 'name': CMD_MIGRATE_SCHEMA_TO_LATEST_VERSION, 'required_attribute_names': ['from_version', 'to_version'], 'optional_attribute_names': [], 'user_id_attribute_names': [] }] class StoryNode(python_utils.OBJECT): """Domain object describing a node in the exploration graph of a story. """ def __init__( self, node_id, title, description, thumbnail_filename, thumbnail_bg_color, destination_node_ids, acquired_skill_ids, prerequisite_skill_ids, outline, outline_is_finalized, exploration_id): """Initializes a StoryNode domain object. Args: node_id: str. The unique id for each node. title: str. The title of the story node. description: str. The description for the story node. thumbnail_filename: str|None. The thumbnail filename of the story node. thumbnail_bg_color: str|None. The thumbnail background color of the story node. destination_node_ids: list(str). The list of destination node ids that this node points to in the story graph. acquired_skill_ids: list(str). The list of skill ids acquired by the user on completion of the node. prerequisite_skill_ids: list(str). The list of skill ids required before starting a node. outline: str. Free-form annotations that a lesson implementer can use to construct the exploration. It describes the basic theme or template of the story and is to be provided in html form. outline_is_finalized: bool. Whether the outline for the story node is finalized or not. exploration_id: str or None. The valid exploration id that fits the story node. It can be None initially, when the story creator has just created a story with the basic storyline (by providing outlines) without linking an exploration to any node. """ self.id = node_id self.title = title self.description = description self.thumbnail_filename = thumbnail_filename self.thumbnail_bg_color = thumbnail_bg_color self.destination_node_ids = destination_node_ids self.acquired_skill_ids = acquired_skill_ids self.prerequisite_skill_ids = prerequisite_skill_ids self.outline = html_cleaner.clean(outline) self.outline_is_finalized = outline_is_finalized self.exploration_id = exploration_id @classmethod def get_number_from_node_id(cls, node_id): """Decodes the node_id to get the number at the end of the id. Args: node_id: str. The id of the node. Returns: int. The number at the end of the id. """ return int(node_id.replace(NODE_ID_PREFIX, '')) @classmethod def get_incremented_node_id(cls, node_id): """Increments the next node id of the story. Args: node_id: str. The node id to be incremented. Returns: str. The new next node id. """ current_number = StoryNode.get_number_from_node_id(node_id) incremented_node_id = NODE_ID_PREFIX + python_utils.UNICODE( current_number + 1) return incremented_node_id @classmethod def require_valid_node_id(cls, node_id): """Validates the node id for a StoryNode object. Args: node_id: str. The node id to be validated. """ if not isinstance(node_id, python_utils.BASESTRING): raise utils.ValidationError( 'Expected node ID to be a string, received %s' % node_id) pattern = re.compile('%s[0-9]+' % NODE_ID_PREFIX) if not pattern.match(node_id): raise utils.ValidationError( 'Invalid node_id: %s' % node_id) @classmethod def require_valid_thumbnail_filename(cls, thumbnail_filename): """Checks whether the thumbnail filename of the node is a valid one. Args: thumbnail_filename: str. The thumbnail filename to validate. """ utils.require_valid_thumbnail_filename(thumbnail_filename) @classmethod def require_valid_thumbnail_bg_color(cls, thumbnail_bg_color): """Checks whether the thumbnail background color of the story node is a valid one. Args: thumbnail_bg_color: str. The thumbnail background color to validate. Returns: bool. Whether the thumbnail background color is valid or not. """ return thumbnail_bg_color in constants.ALLOWED_THUMBNAIL_BG_COLORS[ 'chapter'] def to_dict(self): """Returns a dict representing this StoryNode domain object. Returns: dict. A dict, mapping all fields of StoryNode instance. """ return { 'id': self.id, 'title': self.title, 'description': self.description, 'thumbnail_filename': self.thumbnail_filename, 'thumbnail_bg_color': self.thumbnail_bg_color, 'destination_node_ids': self.destination_node_ids, 'acquired_skill_ids': self.acquired_skill_ids, 'prerequisite_skill_ids': self.prerequisite_skill_ids, 'outline': self.outline, 'outline_is_finalized': self.outline_is_finalized, 'exploration_id': self.exploration_id } @classmethod def from_dict(cls, node_dict): """Return a StoryNode domain object from a dict. Args: node_dict: dict. The dict representation of StoryNode object. Returns: StoryNode. The corresponding StoryNode domain object. """ node = cls( node_dict['id'], node_dict['title'], node_dict['description'], node_dict['thumbnail_filename'], node_dict['thumbnail_bg_color'], node_dict['destination_node_ids'], node_dict['acquired_skill_ids'], node_dict['prerequisite_skill_ids'], node_dict['outline'], node_dict['outline_is_finalized'], node_dict['exploration_id']) return node @classmethod def create_default_story_node(cls, node_id, title): """Returns a StoryNode domain object with default values. Args: node_id: str. The id of the node. title: str. The title of the node. Returns: StoryNode. The StoryNode domain object with default value. """ return cls( node_id, title, '', None, None, [], [], [], '', False, None) def validate(self): """Validates various properties of the story node. Raises: ValidationError. One or more attributes of the story node are invalid. """ if self.exploration_id: if not isinstance(self.exploration_id, python_utils.BASESTRING): raise utils.ValidationError( 'Expected exploration ID to be a string, received %s' % self.exploration_id) self.require_valid_thumbnail_filename(self.thumbnail_filename) if self.thumbnail_bg_color is not None and not ( self.require_valid_thumbnail_bg_color(self.thumbnail_bg_color)): raise utils.ValidationError( 'Chapter thumbnail background color %s is not supported.' % ( self.thumbnail_bg_color)) if self.thumbnail_bg_color and self.thumbnail_filename is None: raise utils.ValidationError( 'Chapter thumbnail image is not provided.') if self.thumbnail_filename and self.thumbnail_bg_color is None: raise utils.ValidationError( 'Chapter thumbnail background color is not specified.') if self.exploration_id == '': raise utils.ValidationError( 'Expected exploration ID to not be an empty string, ' 'received %s' % self.exploration_id) if not isinstance(self.outline, python_utils.BASESTRING): raise utils.ValidationError( 'Expected outline to be a string, received %s' % self.outline) if not isinstance(self.title, python_utils.BASESTRING): raise utils.ValidationError( 'Expected title to be a string, received %s' % self.title) if not isinstance(self.description, python_utils.BASESTRING): raise utils.ValidationError( 'Expected description to be a string, received %s' % self.description) description_length_limit = ( android_validation_constants.MAX_CHARS_IN_CHAPTER_DESCRIPTION) if len(self.description) > description_length_limit: raise utils.ValidationError( 'Chapter description should be less than %d chars, received %s' % (description_length_limit, self.description)) title_limit = android_validation_constants.MAX_CHARS_IN_CHAPTER_TITLE if len(self.title) > title_limit: raise utils.ValidationError( 'Chapter title should be less than %d chars, received %s' % (title_limit, self.title)) if not isinstance(self.outline_is_finalized, bool): raise utils.ValidationError( 'Expected outline_is_finalized to be a boolean, received %s' % self.outline_is_finalized) self.require_valid_node_id(self.id) if not isinstance(self.prerequisite_skill_ids, list): raise utils.ValidationError( 'Expected prerequisite skill ids to be a list, received %s' % self.prerequisite_skill_ids) for skill_id in self.prerequisite_skill_ids: if not isinstance(skill_id, python_utils.BASESTRING): raise utils.ValidationError( 'Expected each prerequisite skill id to be a string, ' 'received %s' % skill_id) if ( len(self.prerequisite_skill_ids) > len(set(self.prerequisite_skill_ids))): raise utils.ValidationError( 'Expected all prerequisite skills to be distinct.') if not isinstance(self.acquired_skill_ids, list): raise utils.ValidationError( 'Expected acquired skill ids to be a list, received %s' % self.acquired_skill_ids) for skill_id in self.acquired_skill_ids: if not isinstance(skill_id, python_utils.BASESTRING): raise utils.ValidationError( 'Expected each acquired skill id to be a string, ' 'received %s' % skill_id) if ( len(self.acquired_skill_ids) > len(set(self.acquired_skill_ids))): raise utils.ValidationError( 'Expected all acquired skills to be distinct.') for skill_id in self.prerequisite_skill_ids: if skill_id in self.acquired_skill_ids: raise utils.ValidationError( 'Expected prerequisite skill ids and acquired skill ids ' 'to be mutually exclusive. The skill_id %s intersects ' % skill_id) if not isinstance(self.destination_node_ids, list): raise utils.ValidationError( 'Expected destination node ids to be a list, received %s' % self.destination_node_ids) for node_id in self.destination_node_ids: self.require_valid_node_id(node_id) if node_id == self.id: raise utils.ValidationError( 'The story node with ID %s points to itself.' % node_id) class StoryContents(python_utils.OBJECT): """Domain object representing the story_contents dict.""" def __init__(self, story_nodes, initial_node_id, next_node_id): """Constructs a StoryContents domain object. Args: story_nodes: list(StoryNode). The list of story nodes that are part of this story. initial_node_id: str. The id of the starting node of the story. next_node_id: str. The id for the next node to be added to the story. """ self.initial_node_id = initial_node_id self.nodes = story_nodes self.next_node_id = next_node_id def validate(self): """Validates various properties of the story contents object. Raises: ValidationError. One or more attributes of the story contents are invalid. """ if not isinstance(self.nodes, list): raise utils.ValidationError( 'Expected nodes field to be a list, received %s' % self.nodes) if len(self.nodes) > 0: StoryNode.require_valid_node_id(self.initial_node_id) StoryNode.require_valid_node_id(self.next_node_id) initial_node_is_present = False node_id_list = [] node_title_list = [] for node in self.nodes: if not isinstance(node, StoryNode): raise utils.ValidationError( 'Expected each node to be a StoryNode object, received %s' % node) node.validate() for destination_node_id in node.destination_node_ids: if python_utils.NEXT(( node for node in self.nodes if node.id == destination_node_id), None) is None: raise utils.ValidationError( 'Expected all destination nodes to exist') if node.id == self.initial_node_id: initial_node_is_present = True # Checks whether the number in the id of any node is greater than # the value of next_node_id. if (StoryNode.get_number_from_node_id(node.id) >= StoryNode.get_number_from_node_id(self.next_node_id)): raise utils.ValidationError( 'The node with id %s is out of bounds.' % node.id) node_id_list.append(node.id) node_title_list.append(node.title) if len(self.nodes) > 0: if not initial_node_is_present: raise utils.ValidationError('Expected starting node to exist.') if len(node_id_list) > len(set(node_id_list)): raise utils.ValidationError( 'Expected all node ids to be distinct.') if len(node_title_list) > len(set(node_title_list)): raise utils.ValidationError( 'Expected all chapter titles to be distinct.') # nodes_queue stores the pending nodes to visit in the story that # are unlocked, in a 'queue' form with a First In First Out # structure. nodes_queue = [] is_node_visited = [False] * len(self.nodes) starting_node_index = self.get_node_index(self.initial_node_id) nodes_queue.append(self.nodes[starting_node_index].id) # The user is assumed to have all the prerequisite skills of the # starting node before starting the story. Also, this list models # the skill IDs acquired by a learner as they progress through the # story. simulated_skill_ids = copy.deepcopy( self.nodes[starting_node_index].prerequisite_skill_ids) # The following loop employs a Breadth First Search from the given # starting node and makes sure that the user has acquired all the # prerequisite skills required by the destination nodes 'unlocked' # by visiting a particular node by the time that node is finished. while len(nodes_queue) > 0: current_node_id = nodes_queue.pop() current_node_index = self.get_node_index(current_node_id) is_node_visited[current_node_index] = True current_node = self.nodes[current_node_index] for skill_id in current_node.acquired_skill_ids: simulated_skill_ids.append(skill_id) for node_id in current_node.destination_node_ids: node_index = self.get_node_index(node_id) # The following condition checks whether the destination # node for a particular node, has already been visited, in # which case the story would have loops, which are not # allowed. if is_node_visited[node_index]: raise utils.ValidationError( 'Loops are not allowed in stories.') destination_node = self.nodes[node_index] if not ( set( destination_node.prerequisite_skill_ids ).issubset(simulated_skill_ids)): raise utils.ValidationError( 'The prerequisite skills ' + ' '.join( set(destination_node.prerequisite_skill_ids) - set(simulated_skill_ids)) + ' were not completed before the node with id %s' ' was unlocked.' % node_id) nodes_queue.append(node_id) def get_node_index(self, node_id): """Returns the index of the story node with the given node id, or None if the node id is not in the story contents dict. Args: node_id: str. The id of the node. Returns: int or None. The index of the corresponding node, or None if there is no such node. """ for ind, node in enumerate(self.nodes): if node.id == node_id: return ind return None def get_ordered_nodes(self): """Returns a list of nodes ordered by how they would appear sequentially to a learner. NOTE: Currently, this function assumes only a linear arrangement of nodes. Returns: list(StoryNode). The ordered list of nodes. """ initial_index = self.get_node_index(self.initial_node_id) current_node = self.nodes[initial_index] ordered_nodes_list = [current_node] while current_node.destination_node_ids: next_node_id = current_node.destination_node_ids[0] current_node = self.nodes[self.get_node_index(next_node_id)] ordered_nodes_list.append(current_node) return ordered_nodes_list def get_all_linked_exp_ids(self): """Returns a list of exploration id linked to each of the nodes of story content. Returns: list(str). A list of exploration ids. """ exp_ids = [] for node in self.nodes: if node.exploration_id is not None: exp_ids.append(node.exploration_id) return exp_ids def get_node_with_corresponding_exp_id(self, exp_id): """Returns the node object which corresponds to a given exploration ids. Returns: StoryNode or None. The StoryNode object of the corresponding exploration id if exist else None. """ for node in self.nodes: if node.exploration_id == exp_id: return node raise Exception('Unable to find the exploration id in any node: %s' % ( exp_id)) def to_dict(self): """Returns a dict representing this StoryContents domain object. Returns: dict. A dict, mapping all fields of StoryContents instance. """ return { 'nodes': [ node.to_dict() for node in self.nodes ], 'initial_node_id': self.initial_node_id, 'next_node_id': self.next_node_id } @classmethod def from_dict(cls, story_contents_dict): """Return a StoryContents domain object from a dict. Args: story_contents_dict: dict. The dict representation of StoryContents object. Returns: StoryContents. The corresponding StoryContents domain object. """ story_contents = cls( [ StoryNode.from_dict(story_node_dict) for story_node_dict in story_contents_dict['nodes'] ], story_contents_dict['initial_node_id'], story_contents_dict['next_node_id'] ) return story_contents class Story(python_utils.OBJECT): """Domain object for an Oppia Story.""" def __init__( self, story_id, title, thumbnail_filename, thumbnail_bg_color, description, notes, story_contents, story_contents_schema_version, language_code, corresponding_topic_id, version, url_fragment, meta_tag_content, created_on=None, last_updated=None): """Constructs a Story domain object. Args: story_id: str. The unique ID of the story. title: str. The title of the story. description: str. The high level description of the story. notes: str. A set of notes, that describe the characters, main storyline, and setting. To be provided in html form. story_contents: StoryContents. The StoryContents instance representing the contents (like nodes) that are part of the story. story_contents_schema_version: int. The schema version for the story contents object. language_code: str. The ISO 639-1 code for the language this story is written in. corresponding_topic_id: str. The id of the topic to which the story belongs. version: int. The version of the story. created_on: datetime.datetime. Date and time when the story is created. last_updated: datetime.datetime. Date and time when the story was last updated. thumbnail_filename: str|None. The thumbnail filename of the story. thumbnail_bg_color: str|None. The thumbnail background color of the story. url_fragment: str. The url fragment for the story. meta_tag_content: str. The meta tag content in the topic viewer page. """ self.id = story_id self.title = title self.thumbnail_filename = thumbnail_filename self.thumbnail_bg_color = thumbnail_bg_color self.description = description self.notes = html_cleaner.clean(notes) self.story_contents = story_contents self.story_contents_schema_version = story_contents_schema_version self.language_code = language_code self.corresponding_topic_id = corresponding_topic_id self.created_on = created_on self.last_updated = last_updated self.version = version self.url_fragment = url_fragment self.meta_tag_content = meta_tag_content @classmethod def require_valid_thumbnail_filename(cls, thumbnail_filename): """Checks whether the thumbnail filename of the story is a valid one. Args: thumbnail_filename: str. The thumbnail filename to validate. """ utils.require_valid_thumbnail_filename(thumbnail_filename) @classmethod def require_valid_thumbnail_bg_color(cls, thumbnail_bg_color): """Checks whether the thumbnail background color of the story is a valid one. Args: thumbnail_bg_color: str. The thumbnail background color to validate. Returns: bool. Whether the thumbnail background color is valid or not. """ return thumbnail_bg_color in constants.ALLOWED_THUMBNAIL_BG_COLORS[ 'story'] def validate(self): """Validates various properties of the story object. Raises: ValidationError. One or more attributes of story are invalid. """ self.require_valid_title(self.title) if not isinstance(self.description, python_utils.BASESTRING): raise utils.ValidationError( 'Expected description to be a string, received %s' % self.description) if self.url_fragment is not None: utils.require_valid_url_fragment( self.url_fragment, 'Story Url Fragment', constants.MAX_CHARS_IN_STORY_URL_FRAGMENT) utils.require_valid_meta_tag_content(self.meta_tag_content) self.require_valid_thumbnail_filename(self.thumbnail_filename) if self.thumbnail_bg_color is not None and not ( self.require_valid_thumbnail_bg_color(self.thumbnail_bg_color)): raise utils.ValidationError( 'Story thumbnail background color %s is not supported.' % ( self.thumbnail_bg_color)) if self.thumbnail_bg_color and self.thumbnail_filename is None: raise utils.ValidationError( 'Story thumbnail image is not provided.') if self.thumbnail_filename and self.thumbnail_bg_color is None: raise utils.ValidationError( 'Story thumbnail background color is not specified.') if not isinstance(self.notes, python_utils.BASESTRING): raise utils.ValidationError( 'Expected notes to be a string, received %s' % self.notes) if not isinstance(self.story_contents_schema_version, int): raise utils.ValidationError( 'Expected story contents schema version to be an integer, ' 'received %s' % self.story_contents_schema_version) if (self.story_contents_schema_version != feconf.CURRENT_STORY_CONTENTS_SCHEMA_VERSION): raise utils.ValidationError( 'Expected story contents schema version to be %s, ' 'received %s' % ( feconf.CURRENT_STORY_CONTENTS_SCHEMA_VERSION, self.story_contents_schema_version)) if not isinstance(self.language_code, python_utils.BASESTRING): raise utils.ValidationError( 'Expected language code to be a string, received %s' % self.language_code) if not utils.is_valid_language_code(self.language_code): raise utils.ValidationError( 'Invalid language code: %s' % self.language_code) if not isinstance( self.corresponding_topic_id, python_utils.BASESTRING): raise utils.ValidationError( 'Expected corresponding_topic_id should be a string, received: ' '%s' % self.corresponding_topic_id) self.story_contents.validate() @classmethod def require_valid_story_id(cls, story_id): """Checks whether the story id is a valid one. Args: story_id: str. The story id to validate. """ if not isinstance(story_id, python_utils.BASESTRING): raise utils.ValidationError( 'Story id should be a string, received: %s' % story_id) if len(story_id) != 12: raise utils.ValidationError('Invalid story id.') @classmethod def require_valid_title(cls, title): """Checks whether the story title is a valid one. Args: title: str. The title to validate. """ if not isinstance(title, python_utils.BASESTRING): raise utils.ValidationError('Title should be a string.') if title == '': raise utils.ValidationError('Title field should not be empty') title_limit = android_validation_constants.MAX_CHARS_IN_STORY_TITLE if len(title) > title_limit: raise utils.ValidationError( 'Story title should be less than %d chars, received %s' % (title_limit, title)) def get_acquired_skill_ids_for_node_ids(self, node_ids): """Returns the acquired skill ids of the nodes having the given node ids. Args: node_ids: list(str). The list of IDs of the nodes inside the story. Returns: list(str). The union of the acquired skill IDs corresponding to each of the node IDs. """ acquired_skill_ids = [] for node in self.story_contents.nodes: if node.id in node_ids: for skill_id in node.acquired_skill_ids: if skill_id not in acquired_skill_ids: acquired_skill_ids.append(skill_id) return acquired_skill_ids def get_prerequisite_skill_ids_for_exp_id(self, exp_id): """Returns the prerequisite skill ids of the node having the given exploration id. Args: exp_id: str. The ID of the exploration linked to the story. Returns: list(str)|None. The list of prerequisite skill ids for the exploration or None, if no node is linked to it. """ for node in self.story_contents.nodes: if node.exploration_id == exp_id: return node.prerequisite_skill_ids return None def has_exploration(self, exp_id): """Checks whether an exploration is present in the story. Args: exp_id: str. The ID of the exploration linked to the story. Returns: bool. Whether the exploration is linked to the story. """ for node in self.story_contents.nodes: if node.exploration_id == exp_id: return True return False def to_dict(self): """Returns a dict representing this Story domain object. Returns: dict. A dict, mapping all fields of Story instance. """ return { 'id': self.id, 'title': self.title, 'description': self.description, 'notes': self.notes, 'language_code': self.language_code, 'story_contents_schema_version': self.story_contents_schema_version, 'corresponding_topic_id': self.corresponding_topic_id, 'version': self.version, 'story_contents': self.story_contents.to_dict(), 'thumbnail_filename': self.thumbnail_filename, 'thumbnail_bg_color': self.thumbnail_bg_color, 'url_fragment': self.url_fragment, 'meta_tag_content': self.meta_tag_content } @classmethod def deserialize(cls, json_string): """Returns a Story domain object decoded from a JSON string. Args: json_string: str. A JSON-encoded utf-8 string that can be decoded into a dictionary representing a Story. Only call on strings that were created using serialize(). Returns: Story. The corresponding Story domain object. """ story_dict = json.loads(json_string.decode('utf-8')) created_on = ( utils.convert_string_to_naive_datetime_object( story_dict['created_on']) if 'created_on' in story_dict else None) last_updated = ( utils.convert_string_to_naive_datetime_object( story_dict['last_updated']) if 'last_updated' in story_dict else None) story = cls.from_dict( story_dict, story_version=story_dict['version'], story_created_on=created_on, story_last_updated=last_updated) return story def serialize(self): """Returns the object serialized as a JSON string. Returns: str. JSON-encoded utf-8 string encoding all of the information composing the object. """ story_dict = self.to_dict() # The only reason we add the version parameter separately is that our # yaml encoding/decoding of this object does not handle the version # parameter. # NOTE: If this changes in the future (i.e the version parameter is # added as part of the yaml representation of this object), all YAML # files must add a version parameter to their files with the correct # version of this object. The line below must then be moved to # to_dict(). story_dict['version'] = self.version if self.created_on: story_dict['created_on'] = utils.convert_naive_datetime_to_string( self.created_on) if self.last_updated: story_dict['last_updated'] = utils.convert_naive_datetime_to_string( self.last_updated) return json.dumps(story_dict).encode('utf-8') @classmethod def from_dict( cls, story_dict, story_version=0, story_created_on=None, story_last_updated=None): """Returns a Story domain object from a dictionary. Args: story_dict: dict. The dictionary representation of story object. story_version: int. The version of the story. story_created_on: datetime.datetime. Date and time when the story is created. story_last_updated: datetime.datetime. Date and time when the story was last updated. Returns: Story. The corresponding Story domain object. """ story = cls( story_dict['id'], story_dict['title'], story_dict['thumbnail_filename'], story_dict['thumbnail_bg_color'], story_dict['description'], story_dict['notes'], StoryContents.from_dict(story_dict['story_contents']), story_dict['story_contents_schema_version'], story_dict['language_code'], story_dict['corresponding_topic_id'], story_version, story_dict['url_fragment'], story_dict['meta_tag_content'], story_created_on, story_last_updated) return story @classmethod def create_default_story( cls, story_id, title, description, corresponding_topic_id, url_fragment): """Returns a story domain object with default values. This is for the frontend where a default blank story would be shown to the user when the story is created for the first time. Args: story_id: str. The unique id of the story. title: str. The title for the newly created story. description: str. The high level description of the story. corresponding_topic_id: str. The id of the topic to which the story belongs. url_fragment: str. The url fragment of the story. Returns: Story. The Story domain object with the default values. """ # Initial node id for a new story. initial_node_id = '%s1' % NODE_ID_PREFIX story_contents = StoryContents([], None, initial_node_id) return cls( story_id, title, None, None, description, feconf.DEFAULT_STORY_NOTES, story_contents, feconf.CURRENT_STORY_CONTENTS_SCHEMA_VERSION, constants.DEFAULT_LANGUAGE_CODE, corresponding_topic_id, 0, url_fragment, '') @classmethod def _convert_story_contents_v1_dict_to_v2_dict(cls, story_contents_dict): """Converts old Story Contents schema to the modern v2 schema. v2 schema introduces the thumbnail_filename and thumbnail_bg_color fields for Story Nodes. Args: story_contents_dict: dict. A dict used to initialize a Story Contents domain object. Returns: dict. The converted story_contents_dict. """ for index in python_utils.RANGE(len(story_contents_dict['nodes'])): story_contents_dict['nodes'][index]['thumbnail_filename'] = None story_contents_dict['nodes'][index]['thumbnail_bg_color'] = None return story_contents_dict @classmethod def _convert_story_contents_v2_dict_to_v3_dict(cls, story_contents_dict): """Converts v2 Story Contents schema to the v3 schema. v3 schema introduces the description field for Story Nodes. Args: story_contents_dict: dict. A dict used to initialize a Story Contents domain object. Returns: dict. The converted story_contents_dict. """ for node in story_contents_dict['nodes']: node['description'] = '' return story_contents_dict @classmethod def _convert_story_contents_v3_dict_to_v4_dict(cls, story_contents_dict): """Converts v3 Story Contents schema to the v4 schema. v4 schema introduces the new schema for Math components. Args: story_contents_dict: dict. A dict used to initialize a Story Contents domain object. Returns: dict. The converted story_contents_dict. """ for node in story_contents_dict['nodes']: node['outline'] = ( html_validation_service.add_math_content_to_math_rte_components( node['outline'])) return story_contents_dict @classmethod def update_story_contents_from_model( cls, versioned_story_contents, current_version): """Converts the story_contents blob contained in the given versioned_story_contents dict from current_version to current_version + 1. Note that the versioned_story_contents being passed in is modified in-place. Args: versioned_story_contents: dict. A dict with two keys: - schema_version: str. The schema version for the story_contents dict. - story_contents: dict. The dict comprising the story contents. current_version: int. The current schema version of story_contents. """ versioned_story_contents['schema_version'] = current_version + 1 conversion_fn = getattr( cls, '_convert_story_contents_v%s_dict_to_v%s_dict' % ( current_version, current_version + 1)) versioned_story_contents['story_contents'] = conversion_fn( versioned_story_contents['story_contents']) def update_title(self, title): """Updates the title of the story. Args: title: str. The new title of the story. """ self.title = title def update_thumbnail_filename(self, thumbnail_filename): """Updates the thumbnail filename of the story. Args: thumbnail_filename: str|None. The new thumbnail filename of the story. """ self.thumbnail_filename = thumbnail_filename def update_thumbnail_bg_color(self, thumbnail_bg_color): """Updates the thumbnail background color of the story. Args: thumbnail_bg_color: str|None. The new thumbnail background color of the story. """ self.thumbnail_bg_color = thumbnail_bg_color def update_description(self, description): """Updates the description of the story. Args: description: str. The new description of the story. """ self.description = description def update_notes(self, notes): """Updates the notes of the story. Args: notes: str. The new notes of the story. """ self.notes = notes def update_language_code(self, language_code): """Updates the language code of the story. Args: language_code: str. The new language code of the story. """ self.language_code = language_code def update_url_fragment(self, url_fragment): """Updates the url fragment of the story. Args: url_fragment: str. The new url fragment of the story. """ self.url_fragment = url_fragment def update_meta_tag_content(self, new_meta_tag_content): """Updates the meta tag content of the story. Args: new_meta_tag_content: str. The updated meta tag content for the story. """ self.meta_tag_content = new_meta_tag_content def add_node(self, desired_node_id, node_title): """Adds a new default node with the id as story_contents.next_node_id. Args: desired_node_id: str. The node id to be given to the new node in the story. node_title: str. The title for the new story node. Raises: Exception. The desired_node_id differs from story_contents.next_node_id. """ if self.story_contents.next_node_id != desired_node_id: raise Exception( 'The node id %s does not match the expected ' 'next node id for the story.' % desired_node_id) self.story_contents.nodes.append( StoryNode.create_default_story_node(desired_node_id, node_title)) self.story_contents.next_node_id = ( StoryNode.get_incremented_node_id(self.story_contents.next_node_id)) if self.story_contents.initial_node_id is None: self.story_contents.initial_node_id = desired_node_id def _check_exploration_id_already_present(self, exploration_id): """Returns whether a node with the given exploration id is already present in story_contents. Args: exploration_id: str. The id of the exploration. Returns: bool. Whether a node with the given exploration ID is already present. """ for node in self.story_contents.nodes: if node.exploration_id == exploration_id: return True return False def delete_node(self, node_id): """Deletes a node with the given node_id. Args: node_id: str. The id of the node. Raises: ValueError. The node is not part of the story. """ node_index = self.story_contents.get_node_index(node_id) if node_index is None: raise ValueError( 'The node with id %s is not part of this story' % node_id) if node_id == self.story_contents.initial_node_id: if len(self.story_contents.nodes) == 1: self.story_contents.initial_node_id = None else: raise ValueError( 'The node with id %s is the starting node for the story, ' 'change the starting node before deleting it.' % node_id) for node in self.story_contents.nodes: if node_id in node.destination_node_ids: node.destination_node_ids.remove(node_id) del self.story_contents.nodes[node_index] def update_node_outline(self, node_id, new_outline): """Updates the outline field of a given node. Args: node_id: str. The id of the node. new_outline: str. The new outline of the given node. Raises: ValueError. The node is not part of the story. """ node_index = self.story_contents.get_node_index(node_id) if node_index is None: raise ValueError( 'The node with id %s is not part of this story' % node_id) self.story_contents.nodes[node_index].outline = new_outline def update_node_title(self, node_id, new_title): """Updates the title field of a given node. Args: node_id: str. The id of the node. new_title: str. The new title of the given node. Raises: ValueError. The node is not part of the story. """ node_index = self.story_contents.get_node_index(node_id) if node_index is None: raise ValueError( 'The node with id %s is not part of this story' % node_id) self.story_contents.nodes[node_index].title = new_title def update_node_description(self, node_id, new_description): """Updates the description field of a given node. Args: node_id: str. The id of the node. new_description: str. The new description of the given node. Raises: ValueError. The node is not part of the story. """ node_index = self.story_contents.get_node_index(node_id) if node_index is None: raise ValueError( 'The node with id %s is not part of this story' % node_id) self.story_contents.nodes[node_index].description = new_description def update_node_thumbnail_filename(self, node_id, new_thumbnail_filename): """Updates the thumbnail filename field of a given node. Args: node_id: str. The id of the node. new_thumbnail_filename: str|None. The new thumbnail filename of the given node. Raises: ValueError. The node is not part of the story. """ node_index = self.story_contents.get_node_index(node_id) if node_index is None: raise ValueError( 'The node with id %s is not part of this story' % node_id) self.story_contents.nodes[node_index].thumbnail_filename = ( new_thumbnail_filename) def update_node_thumbnail_bg_color(self, node_id, new_thumbnail_bg_color): """Updates the thumbnail background color field of a given node. Args: node_id: str. The id of the node. new_thumbnail_bg_color: str|None. The new thumbnail background color of the given node. Raises: ValueError. The node is not part of the story. """ node_index = self.story_contents.get_node_index(node_id) if node_index is None: raise ValueError( 'The node with id %s is not part of this story' % node_id) self.story_contents.nodes[node_index].thumbnail_bg_color = ( new_thumbnail_bg_color) def mark_node_outline_as_finalized(self, node_id): """Updates the outline_is_finalized field of the node with the given node_id as True. Args: node_id: str. The id of the node. Raises: ValueError. The node is not part of the story. """ node_index = self.story_contents.get_node_index(node_id) if node_index is None: raise ValueError( 'The node with id %s is not part of this story' % node_id) self.story_contents.nodes[node_index].outline_is_finalized = True def mark_node_outline_as_unfinalized(self, node_id): """Updates the outline_is_finalized field of the node with the given node_id as False. Args: node_id: str. The id of the node. Raises: ValueError. The node is not part of the story. """ node_index = self.story_contents.get_node_index(node_id) if node_index is None: raise ValueError( 'The node with id %s is not part of this story' % node_id) self.story_contents.nodes[node_index].outline_is_finalized = False def update_node_acquired_skill_ids(self, node_id, new_acquired_skill_ids): """Updates the acquired skill ids field of a given node. Args: node_id: str. The id of the node. new_acquired_skill_ids: list(str). The updated acquired skill id list. Raises: ValueError. The node is not part of the story. """ node_index = self.story_contents.get_node_index(node_id) if node_index is None: raise ValueError( 'The node with id %s is not part of this story' % node_id) self.story_contents.nodes[node_index].acquired_skill_ids = ( new_acquired_skill_ids) def update_node_prerequisite_skill_ids( self, node_id, new_prerequisite_skill_ids): """Updates the prerequisite skill ids field of a given node. Args: node_id: str. The id of the node. new_prerequisite_skill_ids: list(str). The updated prerequisite skill id list. Raises: ValueError. The node is not part of the story. """ node_index = self.story_contents.get_node_index(node_id) if node_index is None: raise ValueError( 'The node with id %s is not part of this story' % node_id) self.story_contents.nodes[node_index].prerequisite_skill_ids = ( new_prerequisite_skill_ids) def update_node_destination_node_ids( self, node_id, new_destination_node_ids): """Updates the destination_node_ids field of a given node. Args: node_id: str. The id of the node. new_destination_node_ids: list(str). The updated destination node id list. Raises: ValueError. The node is not part of the story. """ node_index = self.story_contents.get_node_index(node_id) if node_index is None: raise ValueError( 'The node with id %s is not part of this story' % node_id) self.story_contents.nodes[node_index].destination_node_ids = ( new_destination_node_ids) def rearrange_node_in_story(self, from_index, to_index): """Rearranges or moves a node in the story content. Args: from_index: int. The index of the node to move. to_index: int. The index at which to insert the moved node. Raises: Exception. Invalid input. """ if not isinstance(from_index, int): raise Exception( 'Expected from_index value to be a number, ' 'received %s' % from_index) if not isinstance(to_index, int): raise Exception( 'Expected to_index value to be a number, ' 'received %s' % to_index) if from_index == to_index: raise Exception( 'Expected from_index and to_index values to be different.') story_content_nodes = self.story_contents.nodes if from_index >= len(story_content_nodes) or from_index < 0: raise Exception('Expected from_index value to be with-in bounds.') if to_index >= len(story_content_nodes) or to_index < 0: raise Exception('Expected to_index value to be with-in bounds.') story_node_to_move = copy.deepcopy(story_content_nodes[from_index]) del story_content_nodes[from_index] story_content_nodes.insert(to_index, story_node_to_move) def update_node_exploration_id( self, node_id, new_exploration_id): """Updates the exploration id field of a given node. Args: node_id: str. The id of the node. new_exploration_id: str. The updated exploration id for a node. Raises: ValueError. The node is not part of the story. """ node_index = self.story_contents.get_node_index(node_id) if node_index is None: raise ValueError( 'The node with id %s is not part of this story.' % node_id) if ( self.story_contents.nodes[node_index].exploration_id == new_exploration_id): return if ( new_exploration_id is not None and self._check_exploration_id_already_present(new_exploration_id)): raise ValueError( 'A node with exploration id %s already exists.' % new_exploration_id) self.story_contents.nodes[node_index].exploration_id = ( new_exploration_id) def update_initial_node(self, new_initial_node_id): """Updates the starting node of the story. Args: new_initial_node_id: str. The new starting node id. Raises: ValueError. The node is not part of the story. """ node_index = self.story_contents.get_node_index(new_initial_node_id) if node_index is None: raise ValueError( 'The node with id %s is not part of this story.' % new_initial_node_id) self.story_contents.initial_node_id = new_initial_node_id class StorySummary(python_utils.OBJECT): """Domain object for Story Summary.""" def __init__( self, story_id, title, description, language_code, version, node_titles, thumbnail_bg_color, thumbnail_filename, url_fragment, story_model_created_on, story_model_last_updated): """Constructs a StorySummary domain object. Args: story_id: str. The unique id of the story. title: str. The title of the story. description: str. The description of the story. language_code: str. The language code of the story. version: int. The version of the story. node_titles: list(str). The titles of nodes present in the story. thumbnail_bg_color: str|None. The thumbnail background color of the story. thumbnail_filename: str|None. The thumbnail filename of the story. url_fragment: str. The url fragment for the story. story_model_created_on: datetime.datetime. Date and time when the story model is created. story_model_last_updated: datetime.datetime. Date and time when the story model was last updated. """ self.id = story_id self.title = title self.description = description self.language_code = language_code self.version = version self.node_titles = node_titles self.thumbnail_bg_color = thumbnail_bg_color self.thumbnail_filename = thumbnail_filename self.url_fragment = url_fragment self.story_model_created_on = story_model_created_on self.story_model_last_updated = story_model_last_updated def validate(self): """Validates various properties of the story summary object. Raises: ValidationError. One or more attributes of story summary are invalid. """ if self.url_fragment is not None: utils.require_valid_url_fragment( self.url_fragment, 'Story Url Fragment', constants.MAX_CHARS_IN_STORY_URL_FRAGMENT) if not isinstance(self.title, python_utils.BASESTRING): raise utils.ValidationError( 'Expected title to be a string, received %s' % self.title) if self.title == '': raise utils.ValidationError('Title field should not be empty') if not isinstance(self.description, python_utils.BASESTRING): raise utils.ValidationError( 'Expected description to be a string, received %s' % self.description) if not isinstance(self.node_titles, list): raise utils.ValidationError( 'Expected node_titles to be a list, received \'%s\'' % ( self.node_titles)) for title in self.node_titles: if not isinstance(title, python_utils.BASESTRING): raise utils.ValidationError( 'Expected each chapter title to be a string, received %s' % title) utils.require_valid_thumbnail_filename(self.thumbnail_filename) if ( self.thumbnail_bg_color is not None and not ( Story.require_valid_thumbnail_bg_color( self.thumbnail_bg_color))): raise utils.ValidationError( 'Story thumbnail background color %s is not supported.' % ( self.thumbnail_bg_color)) if self.thumbnail_bg_color and self.thumbnail_filename is None: raise utils.ValidationError( 'Story thumbnail image is not provided.') if self.thumbnail_filename and self.thumbnail_bg_color is None: raise utils.ValidationError( 'Story thumbnail background color is not specified.') if not isinstance(self.language_code, python_utils.BASESTRING): raise utils.ValidationError( 'Expected language code to be a string, received %s' % self.language_code) if not utils.is_valid_language_code(self.language_code): raise utils.ValidationError( 'Invalid language code: %s' % self.language_code) def to_dict(self): """Returns a dictionary representation of this domain object. Returns: dict. A dict representing this StorySummary object. """ return { 'id': self.id, 'title': self.title, 'description': self.description, 'language_code': self.language_code, 'version': self.version, 'node_titles': self.node_titles, 'thumbnail_filename': self.thumbnail_filename, 'thumbnail_bg_color': self.thumbnail_bg_color, 'url_fragment': self.url_fragment, 'story_model_created_on': utils.get_time_in_millisecs( self.story_model_created_on), 'story_model_last_updated': utils.get_time_in_millisecs( self.story_model_last_updated) } def to_human_readable_dict(self): """Returns a dictionary representation of this domain object. Returns: dict. A dict representing this StorySummary object. """ return { 'id': self.id, 'title': self.title, 'description': self.description, 'node_titles': self.node_titles, 'thumbnail_bg_color': self.thumbnail_bg_color, 'thumbnail_filename': self.thumbnail_filename, 'url_fragment': self.url_fragment }
{ "content_hash": "695f19bdcfc3d412d910aa013670da36", "timestamp": "", "source": "github", "line_count": 1625, "max_line_length": 80, "avg_line_length": 39.76, "alnum_prop": 0.5954186658412011, "repo_name": "prasanna08/oppia", "id": "06511bb1cfc2ed4177cec13a32de62b58bb8b090", "size": "65215", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "core/domain/story_domain.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "97795" }, { "name": "HTML", "bytes": "1128491" }, { "name": "JavaScript", "bytes": "733121" }, { "name": "Python", "bytes": "9362251" }, { "name": "Shell", "bytes": "10639" }, { "name": "TypeScript", "bytes": "6077851" } ], "symlink_target": "" }
""" jsonapi.base.handler.base ========================= """ # local from ..response import Response from ..errors import MethodNotAllowed class BaseHandler(object): """ The base class for a request handler. :arg jsonapi.base.api.API api: :arg jsonapi.base.database.Session db: :arg jsonapi.base.request.Request request: """ def __init__(self, api, db, request): """ """ self.api = api self.request = request self.response = Response() self.db = db return None def prepare(self): """ Called directly before :meth:`handle`. """ return None def handle(self): """ Handles a requested. """ if self.request.method == "head": return self.head() elif self.request.method == "get": return self.get() elif self.request.method == "post": return self.post() elif self.request.method == "patch": return self.patch() elif self.request.method == "delete": return self.delete() raise MethodNotAllowed() def head(self): """ Handles a HEAD request. """ raise MethodNotAllowed() def get(self): """ Handles a GET request. """ raise MethodNotAllowed() def post(self): """ Handles a POST request. """ raise MethodNotAllowed() def patch(self): """ Handles a PATCH request. """ raise MethodNotAllowed() def delete(self): """ Handles a DELETE request. """ raise MethodNotAllowed()
{ "content_hash": "c8e970cb8c825ccf8e8265c7d03ba0d1", "timestamp": "", "source": "github", "line_count": 79, "max_line_length": 46, "avg_line_length": 21.569620253164558, "alnum_prop": 0.5140845070422535, "repo_name": "benediktschmitt/py-jsonapi", "id": "8f41a7b5b7b4fb08f2e902bc0a1799449f32dd53", "size": "2850", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "jsonapi/base/handler/base.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "266616" } ], "symlink_target": "" }
from torndb import Connection from config import configs _CONNS_ = {} def get_conn(db_id): if db_id in _CONNS_: return _CONNS_[db_id] _CONNS_[db_id] = db = Connection(**configs['db'][db_id]) db._db_args.pop('init_command', None) db.execute("SET TIME_ZONE = 'SYSTEM'") return db
{ "content_hash": "bf2c184fa1eff6d647f3a57b6c46300b", "timestamp": "", "source": "github", "line_count": 14, "max_line_length": 60, "avg_line_length": 22.142857142857142, "alnum_prop": 0.6129032258064516, "repo_name": "Geew/issue-task", "id": "46e695587e6ecc9204b81b0cc60bf824b49f06a6", "size": "327", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "util/db.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "19631" }, { "name": "HTML", "bytes": "45075" }, { "name": "JavaScript", "bytes": "31341" }, { "name": "Python", "bytes": "67593" } ], "symlink_target": "" }
from django.conf.global_settings import * # noqa DATABASES = { "default": { "ENGINE": "django.db.backends.sqlite3", "NAME": "mem_db", } } INSTALLED_APPS = [ "django.contrib.admin", "django.contrib.auth", "django.contrib.contenttypes", "django.contrib.messages", "django.contrib.sessions", "django.contrib.sites", "useful.django", "tests.testapp.apps.TestAppConfig", ] SECRET_KEY = 'justfortests' ROOT_URLCONF = 'tests.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ] } } ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', } }
{ "content_hash": "e138a3b86e30a164e8558d079f0456d4", "timestamp": "", "source": "github", "line_count": 53, "max_line_length": 70, "avg_line_length": 25.37735849056604, "alnum_prop": 0.6475836431226766, "repo_name": "tuttle/python-useful", "id": "40453ee464ef103580ab66342f3987fed70f5f54", "size": "1345", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/settings.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "HTML", "bytes": "528" }, { "name": "Python", "bytes": "102029" } ], "symlink_target": "" }
from tornado.options import define from handler.HealthCheck import HealthCheckHandler from handler.Img import ImgHandler from handler.Pdf import PdfHandler from handler.Resize import ResizeHandler from handler.Watermark import WatermarkHandler define("port", default=33005, help="Application port") define("max_buffer_size", default=50 * 1024**2, help="") define("autoreload", default=False, help="Autoreload server on change") define("log_dir", default="/var/log", help="Logger directory") define("log_file", default="rendering-service.log", help="Logger file name") define("font_location", default="/usr/share/fonts/truetype/liberation/LiberationSerif-Bold.ttf", help="") routing = [ (r"/", HealthCheckHandler), (r"/img", ImgHandler), (r"/pdf", PdfHandler), (r"/resize", ResizeHandler), (r"/watermark", WatermarkHandler) ]
{ "content_hash": "b53cc65917eec186854fde1da7796830", "timestamp": "", "source": "github", "line_count": 23, "max_line_length": 105, "avg_line_length": 36.91304347826087, "alnum_prop": 0.7432273262661955, "repo_name": "jiss-software/jiss-rendering-service", "id": "c0099cd0a021a5ef31281ed65c44be18dc4dd126", "size": "849", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "settings.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "17262" }, { "name": "Shell", "bytes": "272" } ], "symlink_target": "" }
from django.core.exceptions import ObjectDoesNotExist from djblets.util.decorators import augment_method_from from djblets.webapi.decorators import (webapi_login_required, webapi_response_errors, webapi_request_fields) from djblets.webapi.errors import (DOES_NOT_EXIST, INVALID_FORM_DATA, NOT_LOGGED_IN, PERMISSION_DENIED) from djblets.webapi.fields import ResourceFieldType from reviewboard.webapi.decorators import webapi_check_local_site from reviewboard.webapi.resources import resources from reviewboard.webapi.resources.base_review_general_comment import \ BaseReviewGeneralCommentResource from reviewboard.webapi.resources.review_general_comment import \ ReviewGeneralCommentResource class ReviewReplyGeneralCommentResource(BaseReviewGeneralCommentResource): """Provides information on replies to general comments. If the reply is a draft, then comments can be added, deleted, or changed on this list. However, if the reply is already published, then no changed can be made. """ allowed_methods = ('GET', 'POST', 'PUT', 'DELETE') policy_id = 'review_reply_general_comment' model_parent_key = 'review' uri_template_name = 'review_reply_general_comment' fields = dict({ 'reply_to': { 'type': ResourceFieldType, 'resource': ReviewGeneralCommentResource, 'description': 'The comment being replied to.', }, }, **BaseReviewGeneralCommentResource.fields) mimetype_list_resource_name = 'review-reply-general-comments' mimetype_item_resource_name = 'review-reply-general-comment' def get_queryset(self, request, review_id, reply_id, *args, **kwargs): q = super(ReviewReplyGeneralCommentResource, self).get_queryset( request, *args, **kwargs) q = q.filter(review=reply_id, review__base_reply_to=review_id) return q @webapi_check_local_site @webapi_login_required @webapi_response_errors(DOES_NOT_EXIST, INVALID_FORM_DATA, NOT_LOGGED_IN, PERMISSION_DENIED) @webapi_request_fields( required=BaseReviewGeneralCommentResource.REPLY_REQUIRED_CREATE_FIELDS, optional=BaseReviewGeneralCommentResource.REPLY_OPTIONAL_CREATE_FIELDS, allow_unknown=True, ) def create(self, request, reply_to_id, *args, **kwargs): """Creates a reply to a general comment on a review. This will create a reply to a general comment on a review. The new comment will contain the same dimensions of the comment being replied to, but may contain new text. Extra data can be stored later lookup. See :ref:`webapi2.0-extra-data` for more information. """ try: resources.review_request.get_object(request, *args, **kwargs) reply = resources.review_reply.get_object(request, *args, **kwargs) except ObjectDoesNotExist: return DOES_NOT_EXIST if not resources.review_reply.has_modify_permissions(request, reply): return self.get_no_access_error(request) try: comment = resources.review_general_comment.get_object( request, comment_id=reply_to_id, *args, **kwargs) except ObjectDoesNotExist: return INVALID_FORM_DATA, { 'fields': { 'reply_to_id': ['This is not a valid general comment ID'], } } return self.create_or_update_comment_reply( request=request, comment=comment, reply=reply, comments_m2m=reply.general_comments, *args, **kwargs) @webapi_check_local_site @webapi_login_required @webapi_response_errors(DOES_NOT_EXIST, NOT_LOGGED_IN, PERMISSION_DENIED) @webapi_request_fields( optional=BaseReviewGeneralCommentResource.REPLY_OPTIONAL_UPDATE_FIELDS, allow_unknown=True, ) def update(self, request, *args, **kwargs): """Updates a reply to a general comment. This can only update the text in the comment. The comment being replied to cannot change. Extra data can be stored later lookup. See :ref:`webapi2.0-extra-data` for more information. """ try: resources.review_request.get_object(request, *args, **kwargs) reply = resources.review_reply.get_object(request, *args, **kwargs) general_comment = self.get_object(request, *args, **kwargs) except ObjectDoesNotExist: return DOES_NOT_EXIST return self.update_comment(request=request, review=reply, comment=general_comment, is_reply=True, **kwargs) @augment_method_from(BaseReviewGeneralCommentResource) def delete(self, *args, **kwargs): """Deletes a general comment from a draft reply. This will remove the comment from the reply. This cannot be undone. Only comments on draft replies can be deleted. Attempting to delete a published comment will return a Permission Denied error. Instead of a payload response, this will return :http:`204`. """ pass @augment_method_from(BaseReviewGeneralCommentResource) def get(self, *args, **kwargs): """Returns information on a reply to a general comment. Much of the information will be identical to that of the comment being replied to. """ pass @augment_method_from(BaseReviewGeneralCommentResource) def get_list(self, *args, **kwargs): """Returns the list of replies to general comments made on a review.""" pass review_reply_general_comment_resource = \ ReviewReplyGeneralCommentResource()
{ "content_hash": "920850af6b7eebe44fd0976b03b03cb5", "timestamp": "", "source": "github", "line_count": 153, "max_line_length": 79, "avg_line_length": 39.30718954248366, "alnum_prop": 0.6371799135350849, "repo_name": "reviewboard/reviewboard", "id": "e78b2b85d7f633e9f7bfcd2383c4656e3eb3e683", "size": "6014", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "reviewboard/webapi/resources/review_reply_general_comment.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "10167" }, { "name": "Dockerfile", "bytes": "7721" }, { "name": "HTML", "bytes": "226489" }, { "name": "JavaScript", "bytes": "3991608" }, { "name": "Less", "bytes": "438017" }, { "name": "Python", "bytes": "9186415" }, { "name": "Shell", "bytes": "3855" } ], "symlink_target": "" }
from . import MemoryMixin class UnwrapperMixin(MemoryMixin): """ This mixin processes SimActionObjects by passing on their .ast field. """ def store(self, addr, data, size=None, condition=None, **kwargs): return super().store(_raw_ast(addr), _raw_ast(data), size=_raw_ast(size), condition=_raw_ast(condition), **kwargs) def load(self, addr, size=None, condition=None, fallback=None, **kwargs): return super().load(_raw_ast(addr), size=_raw_ast(size), condition=_raw_ast(condition), fallback=_raw_ast(fallback), **kwargs) def find(self, addr, what, max_search, default=None, **kwargs): return super().find(_raw_ast(addr), _raw_ast(what), max_search, default=_raw_ast(default), **kwargs) def copy_contents(self, dst, src, size, condition=None, **kwargs): return super().copy_contents(_raw_ast(dst), _raw_ast(src), _raw_ast(size), _raw_ast(condition), **kwargs) from ...state_plugins.sim_action_object import _raw_ast
{ "content_hash": "20ae8dc6b68369b0b9972a5d038e338b", "timestamp": "", "source": "github", "line_count": 28, "max_line_length": 113, "avg_line_length": 38.857142857142854, "alnum_prop": 0.6056985294117647, "repo_name": "angr/angr", "id": "60733582ced698b6a17f52e8417f92b9fc524fe3", "size": "1088", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "angr/storage/memory_mixins/unwrapper_mixin.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "C", "bytes": "6694" }, { "name": "C++", "bytes": "146292" }, { "name": "Makefile", "bytes": "946" }, { "name": "Python", "bytes": "27717304" } ], "symlink_target": "" }
import ssl from pg8000.dbapi import connect def test_md5_ssl(db_kwargs): context = ssl.create_default_context() context.check_hostname = False context.verify_mode = ssl.CERT_NONE db_kwargs["ssl_context"] = context with connect(**db_kwargs): pass
{ "content_hash": "06313725905591fb50e3e720cadd3761", "timestamp": "", "source": "github", "line_count": 12, "max_line_length": 42, "avg_line_length": 23.083333333333332, "alnum_prop": 0.6823104693140795, "repo_name": "tlocke/pg8000", "id": "6f065a7abce77b7118f02efe21d2fb226b2c7688", "size": "277", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "test/dbapi/auth/test_md5_ssl.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "331262" } ], "symlink_target": "" }
from synapse.api.errors import SynapseError from synapse.types import StreamToken import logging logger = logging.getLogger(__name__) class SourcePaginationConfig(object): """A configuration object which stores pagination parameters for a specific event source.""" def __init__(self, from_key=None, to_key=None, direction='f', limit=None): self.from_key = from_key self.to_key = to_key self.direction = 'f' if direction == 'f' else 'b' self.limit = int(limit) if limit is not None else None def __repr__(self): return ( "StreamConfig(from_key=%r, to_key=%r, direction=%r, limit=%r)" ) % (self.from_key, self.to_key, self.direction, self.limit) class PaginationConfig(object): """A configuration object which stores pagination parameters.""" def __init__(self, from_token=None, to_token=None, direction='f', limit=None): self.from_token = from_token self.to_token = to_token self.direction = 'f' if direction == 'f' else 'b' self.limit = int(limit) if limit is not None else None @classmethod def from_request(cls, request, raise_invalid_params=True, default_limit=None): def get_param(name, default=None): lst = request.args.get(name, []) if len(lst) > 1: raise SynapseError( 400, "%s must be specified only once" % (name,) ) elif len(lst) == 1: return lst[0] else: return default direction = get_param("dir", 'f') if direction not in ['f', 'b']: raise SynapseError(400, "'dir' parameter is invalid.") from_tok = get_param("from") to_tok = get_param("to") try: if from_tok == "END": from_tok = None # For backwards compat. elif from_tok: from_tok = StreamToken.from_string(from_tok) except: raise SynapseError(400, "'from' paramater is invalid") try: if to_tok: to_tok = StreamToken.from_string(to_tok) except: raise SynapseError(400, "'to' paramater is invalid") limit = get_param("limit", None) if limit is not None and not limit.isdigit(): raise SynapseError(400, "'limit' parameter must be an integer.") if limit is None: limit = default_limit try: return PaginationConfig(from_tok, to_tok, direction, limit) except: logger.exception("Failed to create pagination config") raise SynapseError(400, "Invalid request.") def __repr__(self): return ( "PaginationConfig(from_tok=%r, to_tok=%r," " direction=%r, limit=%r)" ) % (self.from_token, self.to_token, self.direction, self.limit) def get_source_config(self, source_name): keyname = "%s_key" % source_name return SourcePaginationConfig( from_key=getattr(self.from_token, keyname), to_key=getattr(self.to_token, keyname) if self.to_token else None, direction=self.direction, limit=self.limit, )
{ "content_hash": "251bcb74442a1854cb1c2854a1a7bb50", "timestamp": "", "source": "github", "line_count": 101, "max_line_length": 78, "avg_line_length": 32.613861386138616, "alnum_prop": 0.5622343655130541, "repo_name": "iot-factory/synapse", "id": "167bfe0de3bfaf78dad93b104fac4df4163bb15b", "size": "3903", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "synapse/streams/config.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "2000" }, { "name": "HTML", "bytes": "2905" }, { "name": "JavaScript", "bytes": "176441" }, { "name": "Perl", "bytes": "31842" }, { "name": "Python", "bytes": "1879672" }, { "name": "Shell", "bytes": "4548" } ], "symlink_target": "" }
import numpy as np from baseline_constants import BYTES_WRITTEN_KEY, BYTES_READ_KEY, LOCAL_COMPUTATIONS_KEY class Server: def __init__(self, client_model): self.client_model = client_model self.model = client_model.get_params() self.selected_clients = [] self.updates = [] def select_clients(self, my_round, possible_clients, num_clients=20): """Selects num_clients clients randomly from possible_clients. Note that within function, num_clients is set to min(num_clients, len(possible_clients)). Args: possible_clients: Clients from which the server can select. num_clients: Number of clients to select; default 20 Return: list of (num_train_samples, num_test_samples) """ num_clients = min(num_clients, len(possible_clients)) np.random.seed(my_round) self.selected_clients = np.random.choice(possible_clients, num_clients, replace=False) return [(c.num_train_samples, c.num_test_samples) for c in self.selected_clients] def train_model(self, num_epochs=1, batch_size=10, minibatch=None, clients=None): """Trains self.model on given clients. Trains model on self.selected_clients if clients=None; each client's data is trained with the given number of epochs and batches. Args: clients: list of Client objects. num_epochs: Number of epochs to train. batch_size: Size of training batches. minibatch: fraction of client's data to apply minibatch sgd, None to use FedAvg Return: bytes_written: number of bytes written by each client to server dictionary with client ids as keys and integer values. client computations: number of FLOPs computed by each client dictionary with client ids as keys and integer values. bytes_read: number of bytes read by each client from server dictionary with client ids as keys and integer values. """ if clients is None: clients = self.selected_clients sys_metrics = { c.id: {BYTES_WRITTEN_KEY: 0, BYTES_READ_KEY: 0, LOCAL_COMPUTATIONS_KEY: 0} for c in clients} for c in clients: c.model.set_params(self.model) comp, num_samples, update = c.train(num_epochs, batch_size, minibatch) sys_metrics[c.id][BYTES_READ_KEY] += c.model.size sys_metrics[c.id][BYTES_WRITTEN_KEY] += c.model.size sys_metrics[c.id][LOCAL_COMPUTATIONS_KEY] = comp self.updates.append((num_samples, update)) return sys_metrics def update_model(self): total_weight = 0. base = [0] * len(self.updates[0][1]) for (client_samples, client_model) in self.updates: total_weight += client_samples for i, v in enumerate(client_model): base[i] += (client_samples * v.astype(np.float64)) averaged_soln = [v / total_weight for v in base] self.model = averaged_soln self.updates = [] def test_model(self, clients_to_test, set_to_use='test'): """Tests self.model on given clients. Tests model on self.selected_clients if clients_to_test=None. Args: clients_to_test: list of Client objects. set_to_use: dataset to test on. Should be in ['train', 'test']. """ metrics = {} if clients_to_test is None: clients_to_test = self.selected_clients for client in clients_to_test: client.model.set_params(self.model) c_metrics = client.test(set_to_use) metrics[client.id] = c_metrics return metrics def get_clients_info(self, clients): """Returns the ids, hierarchies and num_samples for the given clients. Returns info about self.selected_clients if clients=None; Args: clients: list of Client objects. """ if clients is None: clients = self.selected_clients ids = [c.id for c in clients] groups = {c.id: c.group for c in clients} num_samples = {c.id: c.num_samples for c in clients} return ids, groups, num_samples def save_model(self, path): """Saves the server model on checkpoints/dataset/model.ckpt.""" # Save server model self.client_model.set_params(self.model) model_sess = self.client_model.sess return self.client_model.saver.save(model_sess, path) def close_model(self): self.client_model.close()
{ "content_hash": "e1f7e61400a68a1e855bd5cc59147d15", "timestamp": "", "source": "github", "line_count": 127, "max_line_length": 94, "avg_line_length": 37.48031496062992, "alnum_prop": 0.6023109243697479, "repo_name": "TalwalkarLab/leaf", "id": "6d01f357e64f106228c2b69284a4a04b438754ba", "size": "4760", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "models/server.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Jupyter Notebook", "bytes": "86983" }, { "name": "Python", "bytes": "138744" }, { "name": "Shell", "bytes": "20564" } ], "symlink_target": "" }
""" Local Normalization Operation. Easy-to-understand (but slow) naive numpy implementation. """ # Authors: Nicolas Pinto <nicolas.pinto@gmail.com> # # License: Proprietary __all__ = ['LNormScipyNaive'] # -- Imports import numpy as np from pythor3.operation.common.lsum import lsum from pythor3.operation.lnorm_.plugins import LNormPlugin # -- Small epsilon from pythor3.operation.lnorm_ import EPSILON # -- Contracts from pythor3.operation.lnorm_ import ( assert_postconditions_on_properties, assert_postconditions_on_data, ) class LNormScipyNaive(LNormPlugin): def run(self): """XXX: docstring""" arr_in = self.arr_in #assert_preconditions_on_data(arr_in) if arr_in.ndim == 2: _arr_in = arr_in[:, :, None] else: _arr_in = arr_in arr_out = self.arr_out if arr_out.ndim == 2: _arr_out = arr_out[:, :, None] else: _arr_out = arr_out ker_h, ker_w = inker_shape = self.inker_shape outker_shape = self.outker_shape dtype = self.arr_in.dtype out_shape = _arr_out.shape remove_mean = self.remove_mean div_method = self.div_method threshold = self.threshold stretch = self.stretch # input (min/max) arr_src = _arr_in[:].copy() # --------------------------------------------------------------------- # compute corresponding numerator (arr_num) and divisor (arr_div) # --------------------------------------------------------------------- # -- handle outker_shape=inker_shape (full) if outker_shape == inker_shape: # -- sum kernel in_d = _arr_in.shape[-1] kshape = list(inker_shape) + [in_d] ker = np.ones(kshape, dtype=dtype) size = float(ker.size) # -- compute sum-of-square arr_sq = arr_src ** 2. assert np.isfinite(arr_sq).all() arr_ssq = lsum(arr_sq, kshape, mode='valid').astype(dtype) assert np.isfinite(arr_ssq).all() # -- compute arr_num and arr_div # preparation ys = inker_shape[0] / 2 xs = inker_shape[1] / 2 arr_out_h, arr_out_w, arr_out_d = out_shape[-3:] hs = arr_out_h ws = arr_out_w # compute 'euclidean' (magnitude) divisor (norm = 1) if div_method == 'euclidean': # with mean substraction if remove_mean: arr_sum = lsum(arr_src, kshape, mode='valid').astype(dtype) arr_num = arr_src[ys:ys + hs, xs:xs + ws] \ - (arr_sum / size) val = (arr_ssq - (arr_sum ** 2.) / size) # to avoid sqrt of negative numbers np.putmask(val, val < 0, 0) arr_div = np.sqrt(val) + EPSILON # without mean substraction else: arr_num = arr_src[ys:ys + hs, xs:xs + ws] # arr_ssq should not have any value < 0 # however, it can happen (e.g. with fftconvolve) # so we ensure to set these values to 0 np.putmask(arr_ssq, arr_ssq < 0., 0.) arr_div = np.sqrt(arr_ssq) + EPSILON # or compute 'std' (standard deviation) divisor (var = 1) elif div_method == 'std': arr_sum = lsum(arr_src, kshape, mode='valid').astype(dtype) # with mean substraction if remove_mean: arr_num = arr_src[ys:ys + hs, xs:xs + ws] \ - (arr_sum / size) # without mean substraction else: arr_num = arr_src[ys:ys + hs, xs:xs + ws] val = (arr_ssq / size - (arr_sum / size) ** 2.) # to avoid sqrt of a negative number np.putmask(val, val < 0., 0.) arr_div = np.sqrt(val) + EPSILON else: raise ValueError("div_method='%s' not understood" % div_method) # --------------------------------------------------------------------- # -- handle outker_shape=(0,0) (per depth dim) *NOT TESTED* elif outker_shape == (0, 0): # -- output shape in_h, in_w, in_d = _arr_in.shape[-3:] kin_h, kin_w = inker_shape arr_out_h = (in_h - kin_h + 1) arr_out_w = (in_w - kin_w + 1) arr_out_d = in_d arr_out_shape = arr_out_h, arr_out_w, arr_out_d # -- sum kernel ker = np.ones(inker_shape, dtype=dtype) size = float(ker.size) # -- compute sum-of-square arr_sq = arr_src ** 2. arr_ssq = lsum(arr_sq, inker_shape + (1,), mode='valid') arr_ssq = arr_ssq.astype(dtype) # -- compute arr_num and arr_div # preparation ys = inker_shape[0] / 2 xs = inker_shape[1] / 2 arr_out_h, arr_out_w, arr_out_d = out_shape[-3:] hs = arr_out_h ws = arr_out_w def get_arr_sum(): arr_sum = np.empty(arr_out_shape, dtype=dtype) for d in xrange(in_d): slice2d = lsum(arr_src[:, :, d], inker_shape, mode='valid') slice2d = slice2d.astype(dtype) arr_sum[:, :, d] = slice2d return arr_sum # compute 'euclidean' (magnitude) divisor (norm = 1) if div_method == 'euclidean': # with mean substraction if remove_mean: arr_sum = get_arr_sum() arr_num = arr_src[ys:ys + hs, xs:xs + ws] \ - (arr_sum / size) val = (arr_ssq - (arr_sum ** 2.) / size) # to avoid sqrt of a negative number np.putmask(val, val < 0., 0.) arr_div = np.sqrt(val) + EPSILON # without mean substraction else: arr_num = arr_src[ys:ys + hs, xs:xs + ws] arr_div = np.sqrt(arr_ssq) + EPSILON # or compute 'std' (standard deviation) divisor (var = 1) elif div_method == 'std': arr_sum = get_arr_sum() # with mean substraction if remove_mean: arr_num = arr_src[ys:ys + hs, xs:xs + ws] \ - (arr_sum / size) # without mean substraction else: arr_num = arr_src[ys:ys + hs, xs:xs + ws] val = (arr_ssq / size - (arr_sum / size) ** 2.) # to avoid sqrt of a negative number np.putmask(val, val < 0., 0.) arr_div = np.sqrt(val) + EPSILON else: raise ValueError("div_method '%s' not understood" % div_method) else: raise ValueError( 'inker_shape=%s and outker_shape=%s not understood' % (inker_shape, outker_shape) ) # --------------------------------------------------------------------- # apply normalization # --------------------------------------------------------------------- if stretch != 1: arr_num *= stretch arr_div *= stretch # volume threshold assert np.isfinite(arr_div).all() np.putmask(arr_div, arr_div < (threshold + EPSILON), 1.) # output (min/max) assert np.isfinite(arr_num).all() assert np.isfinite(arr_div).all() _arr_out[:] = (arr_num / arr_div) if arr_in.ndim == 2: _arr_out.shape = _arr_out.shape[:2] # -- Contracts: postconditions assert_postconditions_on_properties(_arr_in, _arr_out, inker_shape) assert_postconditions_on_data(_arr_out) return _arr_out
{ "content_hash": "e374057ca9971609a09f74614d24c350", "timestamp": "", "source": "github", "line_count": 226, "max_line_length": 79, "avg_line_length": 35.836283185840706, "alnum_prop": 0.4554883318928263, "repo_name": "npinto/sthor", "id": "b5856bf3bfd26ac456d1d2bf3b2e877a8c86a4b3", "size": "8099", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "sthor/operation/lnorm.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "51978" } ], "symlink_target": "" }
import os from tori.cli.command import Command from jinja2 import Template class FlaskCreateSkeletonApp(Command): """ Create a skeleton app """ def define_arguments(self, argument_parser): argument_parser.add_argument('name', help='The name of the app and the app module (e.g. piano_and_violin)') argument_parser.add_argument('-p', '--port', type=int, help='the initial port number', default=8000) argument_parser.add_argument('-f', '--force', action='store_true', help='force / override the existing files', default=8000) argument_parser.add_argument('-o', '--output', help='the base output path (default: the current directory)', default='') def execute(self, args): self.app_name = args.name self.force_mode = args.force self.base_path = args.output self.init_port = args.port if self.base_path: if not os.path.exists(self.base_path): os.makedirs(self.base_path) os.chdir(self.base_path) self._generate_directories([ 'static/js', 'static/css', 'static/scss', 'static/image', 'templates' ]) self._copy_resource('Makefile', 'tori_Makefile') self._copy_resource('templates/index.html', 'flask_template_index.html') self._write_resource('server.py', 'flask_server.py') def _copy_resource(self, where, origin): content = resources[origin] self._write(where, content) def _write_resource(self, where, template_name, **contexts): template = Template(resources[template_name]) contexts.update({ 'name': self.app_name, 'port': self.init_port }) self._write(where, template.render(**contexts)) def _write(self, where, content): if os.path.exists(where): if not self.force_mode: return os.unlink(where) with open(where, 'w') as f: f.write(content) def _generate_directories(self, directories): for directory in directories: if os.path.exists(directory): continue os.makedirs(directory)
{ "content_hash": "0f1292148a361194f6a277f08bba05f7", "timestamp": "", "source": "github", "line_count": 67, "max_line_length": 132, "avg_line_length": 33.208955223880594, "alnum_prop": 0.5937078651685394, "repo_name": "shiroyuki/nest", "id": "12c58135c85d3e2ca2fef35d11a0c9423b759837", "size": "2225", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/commands/flask_app_create.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "9589" }, { "name": "Shell", "bytes": "67" } ], "symlink_target": "" }
from tornado.websocket import WebSocketHandler import json import os class DebugHandler(WebSocketHandler): def check_origin(self, origin): return True def open(self, backtest_id): self.uuid = backtest_id print(backtest_id) print("[DebugHandler] WebSocket opened") def on_message(self, message): print "[DebugHandler] message:{}".format(message) m = json.loads(message) def on_close(self): print("[DebugHandler] WebSocket closed")
{ "content_hash": "e512e09d52764267e65cfbfc607974c3", "timestamp": "", "source": "github", "line_count": 21, "max_line_length": 57, "avg_line_length": 24.142857142857142, "alnum_prop": 0.6627218934911243, "repo_name": "mequanta/z-dyno", "id": "2f577a03ddf89c9f27c9f3dfa93f176cf6cea9f2", "size": "507", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "dyno/debug/handlers.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "18072" }, { "name": "Shell", "bytes": "8547" } ], "symlink_target": "" }
from cassandra.metadata import Metadata from cassandra.cluster import Cluster from multicorn import TableDefinition, ColumnDefinition from cassandra.auth import PlainTextAuthProvider import types_mapper import logger from logger import WARNING, ERROR from properties import ISDEBUG def import_schema(schema, srv_options, options, restriction_type, restricts): if ISDEBUG: logger.log(u"import schema {0} requiested with options {1}; restriction type: {2}; restrictions: {3}".format(schema, options, restriction_type, restricts)) if "hosts" not in srv_options: logger.log("The hosts parameter is needed, setting to localhost.", WARNING) hosts = srv_options.get("hosts", "localhost").split(",") if "port" not in srv_options: logger.log("The port parameter is needed, setting to 9042.", WARNING) port = srv_options.get("port", "9042") username = srv_options.get("username", None) password = srv_options.get("password", None) with_row_id = options.get('with_row_id', 'True') == 'True' names_mapping = options.get('mapping', '').split(';') mapping_dict = {} mapping_dict_backward = {} for s in names_mapping: kp = s.split('=') if len(kp) != 2: continue key = kp[0].strip() value = kp[1].strip() mapping_dict[key] = value mapping_dict_backward[value] = key cluster = Cluster(hosts) if(username is not None): cluster.auth_provider = PlainTextAuthProvider(username=username, password=password) # Cassandra connection init session = cluster.connect() keyspace = cluster.metadata.keyspaces[schema] cassandra_tables = [] tables = keyspace.tables views = keyspace.views if restriction_type is None: for t in tables: if t in tables: cassandra_tables.append(tables[t]) else: cassandra_tables.append(views[t]) elif restriction_type == 'limit': for r in restricts: t_name = r if t_name in mapping_dict_backward: t_name = mapping_dict_backward[t_name] if t_name in tables: cassandra_tables.append(tables[t_name]) else: cassandra_tables.append(views[t_name]) elif restriction_type == 'except': for t in tables: if t not in restricts: if t in tables: cassandra_tables.append(tables[t]) else: cassandra_tables.append(views[t]) pg_tables = [] for c_table in cassandra_tables: if ISDEBUG: logger.log("Importing table {0}...".format(c_table.name)) pg_table_name = c_table.name if pg_table_name in mapping_dict: if ISDEBUG: logger.log("Cassandra table name '{0}' maps to PostgreSQL table name '{1}'".format(pg_table_name, mapping_dict[pg_table_name])) pg_table_name = mapping_dict[pg_table_name] pg_table = TableDefinition(pg_table_name) pg_table.options['keyspace'] = schema pg_table.options['columnfamily'] = c_table.name for c_column_name in c_table.columns: cql_type = c_table.columns[c_column_name].cql_type pg_type = types_mapper.get_pg_type(cql_type) if ISDEBUG: logger.log("Adding column {0} with PostgreSQL type {2} (CQL type {1})".format(c_column_name, cql_type, pg_type)) pg_table.columns.append(ColumnDefinition(c_column_name, type_name=pg_type)) if with_row_id: pg_table.columns.append(ColumnDefinition('__rowid__', type_name='text')) pg_tables.append(pg_table) if ISDEBUG: logger.log("Table imported: {0}".format(c_table.name)) session.shutdown() return pg_tables
{ "content_hash": "55b4431aaee593d192bb106a81e73191", "timestamp": "", "source": "github", "line_count": 89, "max_line_length": 163, "avg_line_length": 43.08988764044944, "alnum_prop": 0.6153846153846154, "repo_name": "rankactive/cassandra-fdw", "id": "e8fc0653529cfe9c2a0f918a4aef3cfa9f772759", "size": "3835", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "cassandra-fdw/schema_importer.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "44263" } ], "symlink_target": "" }
''' FCorr: tests module. Meant for use with py.test. Organize tests into files, each named xxx_test.py Read more here: http://pytest.org/ Copyright 2014, Dmytro Fishman Licensed under MIT '''
{ "content_hash": "477962610efb6ab701fcb2261979ec28", "timestamp": "", "source": "github", "line_count": 10, "max_line_length": 49, "avg_line_length": 19.3, "alnum_prop": 0.7461139896373057, "repo_name": "skyfallen/FCorr", "id": "a033ffded69a1b4caa101c10ebcaf9fe07ed5c9b", "size": "193", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/__init__.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "11450" } ], "symlink_target": "" }
from torch.autograd import Variable from utils import MAX_LENGTH import torch.nn as nn class AttnDecoderRNN(nn.Module): def __init__(self, hidden_size, output_size, n_layers, dropout_p=0.1, max_length=MAX_LENGTH): super(AttnDecoderRNN, self).__init__() self.hidden_size = hidden_size self.output_size = output_size self.n_layers = n_layers self.dropout_p = dropout_p self.max_length = max_length self.embedding = nn.Embedding(self.output_size, self.hidden_size) self.attn = nn.Linear(self.hidden_size * 2, self.max_length) self.attn_combine = nn.Linear(self.hidden_size * 2, self.hidden_size) self.dropout = nn.Dropout(self.dropout_p) self.gru = nn.GRU(self.hidden_size, self.hidden_size) self.out = nn.Linear(self.hidden_size, self.output_size) def forward(self, input, hidden, encoder_output, encoder_outputs): embedded = self.embedding(input).view(1, 1, -1) embedded = self.dropout(embedded)
{ "content_hash": "b4ccd06b82852a7f0400491ad084de53", "timestamp": "", "source": "github", "line_count": 24, "max_line_length": 97, "avg_line_length": 42.458333333333336, "alnum_prop": 0.662414131501472, "repo_name": "dikshant2210/Neural-Machine-Translation", "id": "1091a81296244a44044b9582531c19019e6f9763", "size": "1019", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "models/attn_decoder.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "13856" } ], "symlink_target": "" }
import pytest from scout.exceptions import IntegrityError ######################################################### ################### Hpo tests ####################### ######################################################### def test_add_hpo_term(adapter): ## GIVEN a empty adapter assert len([term for term in adapter.hpo_terms()]) == 0 hpo_term = dict( _id="HP1", # Same as hpo_id hpo_id="HP1", # Required description="First term", genes=[1], # List with integers that are hgnc_ids ) ## WHEN loading a hpo term adapter.load_hpo_term(hpo_term) ## THEN assert that the term have been loaded assert len([term for term in adapter.hpo_terms()]) == 1 def test_add_hpo_term_twice(adapter): ## GIVEN a empty adapter assert len([term for term in adapter.hpo_terms()]) == 0 hpo_term = dict( _id="HP1", # Same as hpo_id hpo_id="HP1", # Required description="First term", genes=[1], # List with integers that are hgnc_ids ) ## WHEN loading a hpo term adapter.load_hpo_term(hpo_term) with pytest.raises(IntegrityError): adapter.load_hpo_term(hpo_term) def test_fetch_term(adapter): ## GIVEN a adapter with one hpo term assert len([term for term in adapter.hpo_terms()]) == 0 hpo_term = dict( _id="HP1", # Same as hpo_id hpo_id="HP1", # Required description="First term", genes=[1], # List with integers that are hgnc_ids ) adapter.load_hpo_term(hpo_term) ## WHEN fetching the hpo terms res = adapter.hpo_term(hpo_term["_id"]) ## THEN assert the term was fetched assert res["_id"] == hpo_term["_id"] def test_fetch_non_existing_hpo_term(adapter): ## GIVEN a adapter with one hpo term assert len([term for term in adapter.hpo_terms()]) == 0 hpo_term = dict( _id="HP1", # Same as hpo_id hpo_id="HP1", # Required description="First term", genes=[1], # List with integers that are hgnc_ids ) adapter.load_hpo_term(hpo_term) ## WHEN fetching the hpo terms res = adapter.hpo_term("non existing") ## THEN assert resut is None assert res is None def test_fetch_all_hpo_terms(adapter): ## GIVEN a adapter with one hpo term assert len([term for term in adapter.hpo_terms()]) == 0 hpo_term = dict( _id="HP1", # Same as hpo_id hpo_id="HP1", # Required description="First term", genes=[1], # List with integers that are hgnc_ids ) adapter.load_hpo_term(hpo_term) ## WHEN fetching the hpo terms res = [term for term in adapter.hpo_terms()] ## THEN assert the term was fetched assert len(res) == 1 def test_fetch_all_hpo_terms_query(real_adapter): adapter = real_adapter ## GIVEN a adapter with one hpo term assert len([term for term in adapter.hpo_terms()]) == 0 hpo_term = dict( _id="HP1", # Same as hpo_id hpo_id="HP1", # Required description="First term", genes=[1], # List with integers that are hgnc_ids ) adapter.load_hpo_term(hpo_term) hpo_term2 = dict( _id="HP2", # Same as hpo_id hpo_id="HP2", # Required description="Second term", genes=[1], # List with integers that are hgnc_ids ) adapter.load_hpo_term(hpo_term2) ## WHEN fetching the hpo terms with partial query res = [term for term in adapter.hpo_terms(query="1")] ## THEN assert only one term was matched assert len(res) == 1 def test_fetch_all_hpo_terms_query_description(real_adapter): adapter = real_adapter ## GIVEN a adapter with one hpo term assert len([term for term in adapter.hpo_terms()]) == 0 hpo_term = dict( _id="HP1", # Same as hpo_id hpo_id="HP1", # Required description="First term", genes=[1], # List with integers that are hgnc_ids ) adapter.load_hpo_term(hpo_term) hpo_term2 = dict( _id="HP2", # Same as hpo_id hpo_id="HP2", # Required description="Second term", genes=[1], # List with integers that are hgnc_ids ) adapter.load_hpo_term(hpo_term2) ## WHEN fetching the hpo terms with partial query res = [term for term in adapter.hpo_terms(query="second")] ## THEN assert only one term was matched assert len(res) == 1 for term in res: assert term["_id"] == "HP2" def test_fetch_all_hpo_terms_query_description_term(real_adapter): adapter = real_adapter ## GIVEN a adapter with one hpo term assert len([term for term in adapter.hpo_terms()]) == 0 hpo_term = dict( _id="HP1", # Same as hpo_id hpo_id="HP1", # Required description="First term", genes=[1], # List with integers that are hgnc_ids ) adapter.load_hpo_term(hpo_term) hpo_term2 = dict( _id="HP2", # Same as hpo_id hpo_id="HP2", # Required description="Second term", genes=[1], # List with integers that are hgnc_ids ) adapter.load_hpo_term(hpo_term2) ## WHEN fetching the hpo terms with partial query res = adapter.hpo_terms(query="term") ## THEN assert only one term was matched assert len([term for term in res]) == 2
{ "content_hash": "57b347bef4c1fbe1862bd7f3a49ad40d", "timestamp": "", "source": "github", "line_count": 183, "max_line_length": 66, "avg_line_length": 29.027322404371585, "alnum_prop": 0.5858433734939759, "repo_name": "Clinical-Genomics/scout", "id": "a2cc572e2371033a8ced6e1f667c6ed09fe9a1cb", "size": "5312", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "tests/adapter/mongo/test_hpo_handler.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "12516" }, { "name": "Dockerfile", "bytes": "1451" }, { "name": "HTML", "bytes": "911931" }, { "name": "JavaScript", "bytes": "32692" }, { "name": "Makefile", "bytes": "1046" }, { "name": "Python", "bytes": "2419990" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('person', '0003_auto_20150223_2310'), ] operations = [ migrations.CreateModel( name='Branch', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('branch', models.CharField(max_length=100)), ], options={ 'db_table': 'branch', }, bases=(models.Model,), ), migrations.CreateModel( name='Profession', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('profession', models.CharField(max_length=100)), ('profession_branch', models.ForeignKey(to='person.Branch')), ], options={ 'db_table': 'profession', }, bases=(models.Model,), ), migrations.CreateModel( name='Region', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('region', models.CharField(max_length=50)), ], options={ 'db_table': 'region', }, bases=(models.Model,), ), migrations.AddField( model_name='person', name='person_branch', field=models.CharField(default=1, max_length=50), preserve_default=False, ), migrations.AddField( model_name='person', name='person_profession', field=models.CharField(default=1, max_length=100), preserve_default=False, ), migrations.AlterField( model_name='person', name='person_position', field=models.CharField(max_length=100), preserve_default=True, ), ]
{ "content_hash": "4a3a2f7111e6b0a2b538868f16b36696", "timestamp": "", "source": "github", "line_count": 65, "max_line_length": 114, "avg_line_length": 32.38461538461539, "alnum_prop": 0.505938242280285, "repo_name": "Timurdov/Python", "id": "a00a73b6d48a4451e5c1de0e2dea3533a78d5a4c", "size": "2129", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "resume/person/migrations/0004_auto_20150226_1357.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "2530" }, { "name": "HTML", "bytes": "29623" }, { "name": "JavaScript", "bytes": "4384" }, { "name": "Python", "bytes": "26426" } ], "symlink_target": "" }
import unittest import uuid from collections import namedtuple from mock import Mock, patch from spoppy import menus, responses from . import utils MockLoader = namedtuple('Loader', ('results', )) class TestOptions(unittest.TestCase): def setUp(self): self.dct = { '1': menus.MenuValue('A', Mock()), '2': menus.MenuValue('B', Mock()), '3': menus.MenuValue('C', Mock()), 's': menus.MenuValue('Search', Mock()), 'kk': menus.MenuValue('pp', Mock()), 'ko': menus.MenuValue('p', Mock()), 'q': menus.MenuValue('o', Mock()), 'o': menus.MenuValue('q', Mock()), } self.op = menus.Options(self.dct) def test_options_filter_empty(self): self.assertEqual(self.op, self.op.filter('')) def test_filter_by_key(self): tc = self.op.filter('1') self.assertEqual(len(tc), 1) self.assertIn('1', tc) tc = self.op.filter('k') self.assertEqual(len(tc), 2) self.assertIn('kk', tc) self.assertIn('ko', tc) tc = self.op.filter('s') self.assertEqual(len(tc), 1) self.assertIn('s', tc) def test_filter_by_name(self): tc = self.op.filter('p') self.assertEqual(len(tc), 2) self.assertIn('kk', tc) self.assertIn('ko', tc) tc = self.op.filter('pp') self.assertEqual(len(tc), 1) self.assertIn('kk', tc) tc = self.op.filter('Sea') self.assertEqual(len(tc), 1) self.assertIn('s', tc) def test_fuzzy_filter(self): dct = { '1': menus.MenuValue('This is a playlist', Mock()) } op = menus.Options(dct) should_match = ( 'ThisIsAPlaylist', 'ThisPlaylist', 'tiaplay', ) for _filter in should_match: self.assertEqual(len(op.filter(_filter)), 1) def test_filter_is_case_insensitive(self): self.assertEqual(self.op.filter('Search'), self.op.filter('search')) def test_filter_returns_empty_if_no_match(self): self.assertEqual(len(self.op.filter('asdf')), 0) def test_get_possibilities_from_name_and_key(self): tc = self.op.get_possibilities('q') self.assertEqual(len(tc), 2) self.assertEqual(sorted(tc), sorted(['q', 'o'])) def test_possibility_not_duplicated(self): tc = self.op.get_possibilities('s') self.assertEqual(len(tc), 1) self.assertIn('s', tc) def test_possiblities_with_spaces(self): op = menus.Options({ ' a': menus.MenuValue('p', Mock()), 'b ': menus.MenuValue('p', Mock()), ' c ': menus.MenuValue('p', Mock()), }) for key in 'a', 'b', 'c': tc = op.get_possibilities(key) self.assertEqual(len(tc), 1) self.assertNotEqual(tc, [key]) self.assertIn(key, tc[0]) def test_matches_by_correct_key(self): op = menus.Options({ 'k': menus.MenuValue('1', Mock()), 'kk': menus.MenuValue('2', Mock()), 'kkk': menus.MenuValue('3', Mock()), }) best = op.match_best_or_none('k') self.assertEqual(best.name, '1') best = op.match_best_or_none('kk') self.assertEqual(best.name, '2') best = op.match_best_or_none('kkk') self.assertEqual(best.name, '3') def test_matches_by_correct_padded_key(self): op = menus.Options({ ' a': menus.MenuValue('1', Mock()), 'b ': menus.MenuValue('2', Mock()), ' c ': menus.MenuValue('3', Mock()), ' s i ': menus.MenuValue('4', Mock()), }) best = op.match_best_or_none('a') self.assertEqual(best.name, '1') best = op.match_best_or_none('b') self.assertEqual(best.name, '2') best = op.match_best_or_none('c') self.assertEqual(best.name, '3') best = op.match_best_or_none('si') self.assertEqual(best.name, '4') def test_check_unique_keys(self): with self.assertRaises(TypeError): menus.Options({ 'a': menus.MenuValue('p', Mock()), ' a': menus.MenuValue('k', Mock()) }) with self.assertRaises(TypeError): self.op[' 1'] = menus.MenuValue('1', Mock()) class MenuTests(unittest.TestCase): def setUp(self): self.navigator = Mock() self.navigator.get_ui_height.return_value = 100 class SubMenu(menus.Menu): def get_options(self): return {} self.submenu = SubMenu(self.navigator) def test_must_be_subclassed(self): m = menus.Menu(self.navigator) with self.assertRaises(NotImplementedError): m.get_options() def test_global_options_correct(self): self.submenu.INCLUDE_UP_ITEM = False self.navigator.player.has_been_loaded.return_value = False self.submenu.initialize() included_items = [ value.destination for value in self.submenu._options.values() ] self.assertEqual(len(included_items), 1) self.assertIn(responses.QUIT, included_items) self.assertNotIn(responses.UP, included_items) self.assertNotIn(responses.PLAYER, included_items) self.submenu.INCLUDE_UP_ITEM = True self.navigator.player.has_been_loaded.return_value = False self.submenu.initialize() included_items = [ value.destination for value in self.submenu._options.values() ] self.assertEqual(len(included_items), 2) self.assertIn(responses.QUIT, included_items) self.assertIn(responses.UP, included_items) self.assertNotIn(responses.PLAYER, included_items) self.submenu.INCLUDE_UP_ITEM = False self.navigator.player.has_been_loaded.return_value = True self.submenu.initialize() included_items = [ value.destination for value in self.submenu._options.values() ] self.assertEqual(len(included_items), 2) self.assertIn(responses.QUIT, included_items) self.assertNotIn(responses.UP, included_items) self.assertIn(responses.PLAYER, included_items) self.submenu.INCLUDE_UP_ITEM = True self.navigator.player.has_been_loaded.return_value = True self.submenu.initialize() included_items = [ value.destination for value in self.submenu._options.values() ] self.assertEqual(len(included_items), 3) self.assertIn(responses.QUIT, included_items) self.assertIn(responses.UP, included_items) self.assertIn(responses.PLAYER, included_items) def test_filter_initialized_correctly(self): self.assertFalse(hasattr(self.submenu, 'filter')) self.submenu.initialize() self.assertTrue(hasattr(self.submenu, 'filter')) self.assertEqual(self.submenu.filter, '') @patch('spoppy.menus.single_char_with_timeout') def test_pagination_keys(self, patched_chargetter): self.assertEqual(self.submenu.PAGE, 0) patched_chargetter.return_value = menus.Menu.DOWN_ARROW self.assertEqual(self.submenu.get_response(), responses.NOOP) self.assertEqual(self.submenu.PAGE, 1) patched_chargetter.return_value = menus.Menu.UP_ARROW self.assertEqual(self.submenu.get_response(), responses.NOOP) self.assertEqual(self.submenu.PAGE, 0) patched_chargetter.return_value = menus.Menu.UP_ARROW self.assertEqual(self.submenu.get_response(), responses.NOOP) self.assertEqual(self.submenu.PAGE, 0) @patch('spoppy.menus.single_char_with_timeout') def test_backspace(self, patched_chargetter): self.submenu.initialize() patched_chargetter.return_value = b'a' self.assertEqual(self.submenu.filter, '') self.assertEqual(self.submenu.get_response(), responses.NOOP) self.assertEqual(self.submenu.filter, 'a') self.assertEqual(self.submenu.get_response(), responses.NOOP) self.assertEqual(self.submenu.filter, 'aa') self.assertEqual(self.submenu.get_response(), responses.NOOP) self.assertEqual(self.submenu.filter, 'aaa') patched_chargetter.return_value = menus.Menu.BACKSPACE self.assertEqual(self.submenu.filter, 'aaa') self.assertEqual(self.submenu.get_response(), responses.NOOP) self.assertEqual(self.submenu.filter, 'aa') self.assertEqual(self.submenu.get_response(), responses.NOOP) self.assertEqual(self.submenu.filter, 'a') self.assertEqual(self.submenu.get_response(), responses.NOOP) self.assertEqual(self.submenu.filter, '') @patch('spoppy.menus.Menu.is_valid_response') @patch('spoppy.menus.single_char_with_timeout') def test_return(self, patched_chargetter, patched_is_valid): destination = 'DESTINATION' patched_is_valid.return_value = menus.MenuValue('TEST', destination) self.submenu.initialize() patched_chargetter.return_value = b'\n' self.assertEqual(self.submenu.get_response(), destination) patched_is_valid.assert_called_once_with() @patch('spoppy.menus.single_char_with_timeout') def test_checks_for_end_of_track(self, patched_chargetter): patched_chargetter.side_effect = [None, None, b'a'] self.submenu.initialize() self.assertEqual(self.submenu.get_response(), responses.NOOP) self.assertEqual(self.submenu.filter, 'a') self.assertEqual( self.navigator.player.check_end_of_track.call_count, 3 ) @patch('spoppy.menus.Options.match_best_or_none') def test_is_valid_uses_options(self, patched_match_best_or_none): patched_match_best_or_none.return_value = 'RETVAL' self.submenu.initialize() self.submenu.filter = 'ASDF' self.assertEqual(self.submenu.is_valid_response(), 'RETVAL') patched_match_best_or_none.assert_called_once_with('ASDF') @patch('spoppy.menus.Options.filter') def test_ui_filters_items(self, patched_filter): self.submenu.initialize() patched_filter.return_value = self.submenu._options self.submenu.get_ui() patched_filter.assert_not_called() self.submenu.filter = 'a' self.submenu.get_ui() patched_filter.assert_called_once_with('a') @patch('spoppy.menus.sorted_menu_items') def test_no_matches_warning_shown(self, patched_sorter): self.submenu.initialize() self.submenu.filter = '' patched_sorter.return_value = [] ui = self.submenu.get_ui() has_filter_in_line = [line for line in ui if 'No matches' in line] self.assertEqual(len(has_filter_in_line), 1) @patch('spoppy.menus.Menu.get_menu_item') def test_uses_get_menu_item(self, patched_get_menu_item): self.submenu.initialize() self.submenu.filter = '' patched_get_menu_item.return_value = 'OHAI' ui = self.submenu.get_ui() self.assertEqual( patched_get_menu_item.call_count, len([line for line in ui if line == 'OHAI']) ) def test_shows_indicator_if_one_match(self): self.submenu.filter = 'a' self.submenu.get_options = Mock() self.submenu.get_options.return_value = menus.Options({ 'the_key': menus.MenuValue('sindri', Mock()), 'foo': menus.MenuValue('foo', Mock()), 'bar': menus.MenuValue('bar', Mock()), }) self.submenu.initialize() ui = self.submenu.get_ui() self.assertEqual(len([line for line in ui if 'sindri' in line]), 1) self.submenu.filter = 'the_key' ui = self.submenu.get_ui() self.assertEqual(len([line for line in ui if 'sindri' in line]), 2) def test_pagination_ui(self): option_indicator = 'THIS IS AN OPTION' random_options = { str(uuid.uuid4()): menus.MenuValue(option_indicator, Mock()) for i in range(1000) } get_options = Mock() get_options.return_value = random_options self.submenu.get_options = get_options self.submenu.initialize() seen_options = 0 last_page = -1 while last_page != self.submenu.PAGE: ui = self.submenu.get_ui() if self.submenu.PAGE == last_page: break seen_options += len([ line for line in ui if option_indicator in line ]) last_page = self.submenu.PAGE self.submenu.PAGE += 1 self.assertEqual(seen_options, len(random_options)) class TestSubMenus(unittest.TestCase): def setUp(self): self.navigator = Mock() self.navigator.session.playlist_container = [] def get_playlist_selected(self): ps = menus.PlayListSelected(self.navigator) tracks = [ utils.Track('Lazarus', ['David Bowie']), utils.Track('Best song ever', ['Sindri'], False), utils.Track('Blackstar', ['David Bowie']), utils.Track('Ziggy Stardust', ['David Bowie']), ] ps.playlist = utils.Playlist('Playlist', tracks) ps.disable_loader() return ps def test_playlist_overview_shows_all_playlists(self): self.playlists = [ utils.Playlist('A', [utils.Track('foo', ['bar'])]), utils.Playlist('B', [utils.Track('foo', ['bar'])]), utils.Playlist('C', [utils.Track('foo', ['bar'])]), ] class Session(object): playlist_container = self.playlists self.navigator.session = Session() pov = menus.PlayListOverview(self.navigator) pov.disable_loader() pov.loader = MockLoader([item, {}] for item in self.playlists) options = menus.Options(pov.get_options()) self.assertTrue( all( isinstance(value.destination, menus.PlayListSelected) for value in options.values() ) ) for playlist in self.playlists: self.assertIsNotNone(options.match_best_or_none(playlist.name)) def test_playlist_overview_shows_invalid_playlists_as_well(self): self.playlists = [ utils.Playlist('', []), utils.Playlist('A', [utils.Track('foo', ['bar'])]), utils.Playlist('B', []), utils.Playlist( 'C', [utils.Track('foo', ['bar'], available=False)] ), utils.Playlist('D', []), ] del self.playlists[1].link class Session(object): playlist_container = self.playlists self.navigator.session = Session() pov = menus.PlayListOverview(self.navigator) pov.disable_loader() pov.loader = MockLoader([[item, {}] for item in self.playlists]) options = menus.Options(pov.get_options()) self.assertEqual(len(options), 5) all_playlist_options = [ t.destination.playlist for t in list(options.values()) ] for playlist in self.playlists: self.assertIn(playlist, all_playlist_options) def test_playlist_selected_does_not_fail_on_empty_playlist(self): ps = menus.PlayListSelected(self.navigator) ps.playlist = utils.Playlist('asdf', []) ps.disable_loader() self.navigator.session.playlist_container = [ps.playlist] # Only delete and radio available self.assertEqual(len(ps.get_options()), 2) self.navigator.spotipy_client = None # Only delete available self.assertEqual(len(ps.get_options()), 1) def test_playlist_selected_contains_only_valid_tracks(self): ps = self.get_playlist_selected() options = menus.Options(ps.get_options()) self.assertIsNotNone(options.match_best_or_none('1')) self.assertIsNotNone(options.match_best_or_none('2')) self.assertIsNotNone(options.match_best_or_none('3')) self.assertIsNone(options.match_best_or_none('4')) def test_shows_shuffle_play(self): ps = self.get_playlist_selected() options = menus.Options(ps.get_options()) destinations = [value.destination for value in options.values()] self.assertIn(ps.shuffle_play, destinations) def test_shows_add_to_queue_if_playing(self): ps = self.get_playlist_selected() self.navigator.player.is_playing.return_value = False options = menus.Options(ps.get_options()) destinations = [value.destination for value in options.values()] self.assertNotIn(ps.add_to_queue, destinations) self.navigator.player.is_playing.return_value = True options = menus.Options(ps.get_options()) destinations = [value.destination for value in options.values()] self.assertIn(ps.add_to_queue, destinations) def test_select_song(self): ps = self.get_playlist_selected() song_selected = ps.select_song(0) self.navigator.player.is_playing.return_value = False self.assertIsInstance(song_selected(), menus.SongSelectedWhilePlaying) self.navigator.player.play_track.assert_not_called() self.navigator.player.is_playing.return_value = True song_selected_result = song_selected() self.assertIsInstance( song_selected_result, menus.SongSelectedWhilePlaying ) self.assertEqual(song_selected_result.playlist, ps.playlist) self.assertEqual(song_selected_result.track, ps.playlist.tracks[0]) self.navigator.player.play_track.assert_not_called() class TestSearch(unittest.TestCase): def setUp(self): self.navigator = Mock() @patch('spoppy.menus.Menu.get_response') def test_uses_parent_get_response(self, patched_get_response): for cls in ( menus.TrackSearchResults, menus.AlbumSearchResults, menus.TrackSearch, menus.AlbumSearch ): patched_get_response.reset_mock() patched_get_response.return_value = 'foobar' menu = cls(self.navigator) self.assertEqual(menu.get_response(), 'foobar') patched_get_response.assert_called_once_with() @patch('spoppy.menus.TrackSearchResults.update_cache') def test_updates_cache_on_init(self, patched_update): search = 'foobar' menu = menus.TrackSearchResults(self.navigator) menu.set_initial_results(search) patched_update.assert_called_once_with() def test_get_update_cache(self): search = 'foobar' menu = menus.TrackSearchResults(self.navigator) self.assertEqual(len(menu.get_cache()), 0) menu.search = search menu.update_cache() self.assertIn(search, menu.get_cache()) @patch('spoppy.menus.TrackSearchResults.search') def test_resets_paginating(self, patched_search): patched_search.loaded_event.wait.return_value = True menu = menus.TrackSearchResults(self.navigator) menu.paginating = True self.assertEqual(menu.get_response(), menu) self.assertFalse(menu.paginating) patched_search.loaded_event.wait.assert_called_once_with() @patch('spoppy.menus.TrackSearchResults.update_cache') @patch('spoppy.menus.search') @patch('spoppy.menus.TrackSearchResults.get_cache') def test_go_to_from_cache( self, patched_cache, patched_search, patched_update ): patched_cache.return_value = [Mock(), Mock()] menu = menus.TrackSearchResults(self.navigator) menu.search = patched_cache.return_value[0] # next_page callback = menu.go_to(1) self.assertEqual(callback(), menu) self.assertEqual(menu.search, patched_cache.return_value[1]) self.assertTrue(menu.paginating) patched_update.assert_not_called() patched_search.assert_not_called() # previous_page callback = menu.go_to(-1) self.assertTrue(callable(callback)) self.assertEqual(callback(), menu) self.assertEqual(menu.search, patched_cache.return_value[0]) self.assertTrue(menu.paginating) patched_update.assert_not_called() patched_search.assert_not_called() @patch('spoppy.menus.TrackSearchResults.update_cache') @patch('spoppy.menus.search') @patch('spoppy.menus.TrackSearchResults.get_cache') def test_go_to_from_search( self, patched_cache, patched_search, patched_update ): patched_cache.return_value = [Mock()] patched_search.return_value = Mock() menu = menus.TrackSearchResults(self.navigator) menu.search = patched_cache.return_value[0] callback = menu.go_to(1) self.assertTrue(callable(callback)) self.assertEqual(callback(), menu) self.assertEqual(menu.search, patched_search.return_value) self.assertTrue(menu.paginating) patched_update.assert_called_once_with() # Don't check for how it was called, at least not at the moment self.assertEqual(patched_search.call_count, 1) def test_mock_playlist_contains_term_in_search(self): menu = menus.TrackSearchResults(self.navigator) menu.search = Mock() menu.search.results.term = 'foobar' self.assertIn('foobar', menu.get_mock_playlist_name()) @patch('spoppy.menus.TrackSearchResults.search') def test_select_song_while_playing(self, patched_self_search): patched_self_search.results.results = ['foo'] self.navigator.player.is_playing.return_value = True menu = menus.TrackSearchResults(self.navigator) callback = menu.select_song(0) self.assertTrue(callable(callback)) res = callback() self.assertIsInstance(res, menus.SongSelectedWhilePlaying) self.assertEqual(res.track, 'foo') @patch('spoppy.menus.TrackSearchResults.search') def test_select_song_while_paused(self, patched_self_search): patched_self_search.results.results = ['foo'] self.navigator.player.is_playing.return_value = False menu = menus.TrackSearchResults(self.navigator) callback = menu.select_song(0) self.assertTrue(callable(callback)) res = callback() # self.assertEqual(res, self.navigator.player) self.assertIsInstance(res, menus.SongSelectedWhilePlaying) self.assertEqual(res.track, 'foo') # self.navigator.player.clear.assert_called_once_with() # self.navigator.player.add_to_queue.assert_called_once_with('foo') # self.navigator.player.play_track.assert_called_once_with(0) @patch('spoppy.menus.TrackSearchResults.search') def test_get_res_idx(self, patched_self_search): menu = menus.TrackSearchResults(self.navigator) for i in range(0, 5, 20): patched_self_search.results.offset = i self.assertEqual(menu.get_res_idx(0), i+1) @patch('spoppy.menus.Menu.get_ui') def test_returns_different_ui_while_paginating(self, patched_get_ui): menu = menus.TrackSearchResults(self.navigator) first_one = menu.get_ui() menu.paginating = True second_one = menu.get_ui() self.assertNotEqual(first_one, second_one) patched_get_ui.assert_called_once_with() def test_get_options(self): menu = menus.TrackSearchResults(self.navigator) menu.search = Mock() menu.search.results.previous_page = True menu.search.results.next_page = True menu.search.results.offset = 1 menu.search.results.results = [] menu.paginating = True self.assertEqual(len(menu.get_options()), 0) menu.paginating = False # Last page, next page self.assertEqual(len(menu.get_options()), 2) menu.search.results.previous_page = False # Only next page self.assertEqual(len(menu.get_options()), 1) menu.search.results.next_page = False self.assertEqual(len(menu.get_options()), 0) menu.search.results.results = [utils.Track('foo', 'bar')] # Shuffle and the song itself self.assertEqual(len(menu.get_options()), 2) class TestPlaylistSaver(unittest.TestCase): def setUp(self): self.navigator = Mock() def test_returns_different_ui_while_paginating(self): menu = menus.SavePlaylist(self.navigator) menu.filter = '' menu.song_list = [] first_one = menu.get_ui() menu.is_saving = True menu.new_playlist_name = '' second_one = menu.get_ui() self.assertNotEqual(first_one, second_one) @patch('spoppy.menus.Menu.get_response') def test_uses_parent_get_response(self, patched_get_response): patched_get_response.reset_mock() patched_get_response.return_value = 'foobar' menu = menus.SavePlaylist(self.navigator) self.assertEqual(menu.get_response(), 'foobar') patched_get_response.assert_called_once_with() @patch('spoppy.menus.Playlist') def test_saves_playlist(self, patched_playlist): class MockSong(object): class Link(object): pass def __init__(self, id): self.link = MockSong.Link() self.link.uri = id patched_playlist.return_value = Mock() spotipy = self.navigator.spotipy_client spotipy.current_user_playlists = Mock() spotipy.current_user_playlists.return_value = { 'items': [], } spotipy.user_playlist_create = Mock() spotipy.user_playlist_create.return_value = { 'id': 'some-id', 'uri': 'some-uri', } spotipy.user_playlist_add_tracks = Mock() self.navigator.spotipy_me = { 'id': 'sindrig', } menu = menus.SavePlaylist(self.navigator) menu.is_saving = True menu.new_playlist_name = 'foobar' menu.song_list = [MockSong(1), MockSong(2), MockSong(3)] menu.callback = Mock() self.assertEqual(menu.get_response(), responses.UP) spotipy.user_playlist_create.assert_called_once_with( user='sindrig', name='foobar', ) spotipy.user_playlist_add_tracks.assert_called_once_with( user='sindrig', playlist_id='some-id', tracks=[1, 2, 3] ) menu.callback.assert_called_once_with(patched_playlist.return_value) patched_playlist.assert_called_once_with( self.navigator.session, 'some-uri', ) patched_playlist.return_value.load.assert_called_once_with() @patch('spoppy.menus.Playlist') def test_edits_playlist(self, patched_playlist): class MockSong(object): class Link(object): pass def __init__(self, id): self.link = MockSong.Link() self.link.uri = id patched_playlist.return_value = Mock() spotipy = self.navigator.spotipy_client spotipy.current_user_playlists = Mock() spotipy.current_user_playlists.return_value = { 'items': [{ 'id': 'some-id', 'name': 'foobar', 'uri': 'some-uri', }], } spotipy.user_playlist_create = Mock() spotipy.user_playlist_add_tracks = Mock() spotipy.user_playlist_replace_tracks = Mock() self.navigator.spotipy_me = { 'id': 'sindrig', } menu = menus.SavePlaylist(self.navigator) menu.is_saving = True menu.new_playlist_name = 'foobar' menu.song_list = [MockSong(1), MockSong(2), MockSong(3)] menu.callback = Mock() self.assertEqual(menu.get_response(), responses.UP) spotipy.user_playlist_create.assert_not_called() spotipy.user_playlist_add_tracks.assert_not_called() spotipy.user_playlist_replace_tracks.assert_called_once_with( user='sindrig', playlist_id='some-id', tracks=[1, 2, 3] ) menu.callback.assert_called_once_with(patched_playlist.return_value) patched_playlist.assert_called_once_with( self.navigator.session, 'some-uri', ) patched_playlist.return_value.load.assert_called_once_with() @patch('spoppy.menus.threading') @patch('spoppy.menus.webbrowser') @patch('spoppy.menus.oAuthServerThread') def test_spotipy_initialization( self, patched_server, patched_browser, patched_threading ): sp_oauth = Mock() self.navigator.lifecycle.get_spotipy_oauth.return_value = sp_oauth sp_oauth.get_authorize_url.return_value = 'http://irdn.is/' menu = menus.LogIntoSpotipy(self.navigator) menu.initialize() sp_oauth.get_authorize_url.assert_called_once_with() patched_server().start.assert_called_once_with() patched_browser.open.assert_called_once_with( sp_oauth.get_authorize_url.return_value ) self.assertIsNone(menu.message_from_spotipy) patched_server.reset_mock() patched_browser.reset_mock() sp_oauth.reset_mock() patched_server().server = None menu = menus.LogIntoSpotipy(self.navigator) menu.initialize() sp_oauth.get_authorize_url.assert_called_once_with() patched_server().start.assert_called_once_with() patched_browser.open.assert_not_called() self.assertIsNotNone(menu.message_from_spotipy) @patch('spoppy.menus.single_char_with_timeout') def test_spotipy_get_response_up(self, patched_chargetter): menu = menus.LogIntoSpotipy(self.navigator) menu.oauth_server = Mock() for quitchar in b'q', b'u': patched_chargetter.return_value = quitchar self.assertEqual(menu.get_response(), responses.UP) menu.oauth_server.shutdown.assert_called_once_with() menu.oauth_server.reset_mock() @patch('spoppy.menus.single_char_with_timeout') def test_spotipy_get_response_response_parts_code( self, patched_chargetter ): menu = menus.LogIntoSpotipy(self.navigator) menu.oauth_server = Mock() menu.sp_oauth = Mock() patched_chargetter.return_value = None menu._spotipy_response_parts = { 'code': [ 'foobar' ] } self.assertEqual(menu.get_response(), responses.UP) menu.oauth_server.shutdown.assert_called_once_with() self.navigator.lifecycle.set_spotipy_token.assert_called_once_with( menu.sp_oauth.get_access_token('foobar') ) self.navigator.refresh_spotipy_client.assert_called_once_with() @patch('spoppy.menus.single_char_with_timeout') def test_spotipy_get_response_response_parts_error( self, patched_chargetter ): menu = menus.LogIntoSpotipy(self.navigator) menu.oauth_server = Mock() menu.sp_oauth = Mock() patched_chargetter.return_value = None menu._spotipy_response_parts = { 'error': [ 'foobar' ] } self.assertEqual(menu.get_response(), responses.NOOP) menu.oauth_server.shutdown.assert_called_once_with() self.navigator.lifecycle.set_spotipy_token.assert_not_called() self.assertIn('foobar', menu.message_from_spotipy) @patch('spoppy.menus.single_char_with_timeout') def test_spotipy_get_response_response_parts_invalid( self, patched_chargetter ): menu = menus.LogIntoSpotipy(self.navigator) menu.oauth_server = Mock() menu.sp_oauth = Mock() patched_chargetter.return_value = None menu._spotipy_response_parts = { 'foobar': [ 'hallo', 'madur' ] } self.assertEqual(menu.get_response(), responses.NOOP) menu.oauth_server.shutdown.assert_called_once_with() self.navigator.lifecycle.set_spotipy_token.assert_not_called() self.assertIn('hallo', menu.message_from_spotipy) self.assertIn('madur', menu.message_from_spotipy)
{ "content_hash": "9adb57d73497e168453319697a3bb2ac", "timestamp": "", "source": "github", "line_count": 894, "max_line_length": 78, "avg_line_length": 36.355704697986575, "alnum_prop": 0.612362316165159, "repo_name": "sindrig/spoppy", "id": "c67608ec59dab60cfbd550c5c3de7d31e5bd5cf5", "size": "32502", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/test_menus.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "453" }, { "name": "Python", "bytes": "171954" }, { "name": "Shell", "bytes": "301" } ], "symlink_target": "" }
from lxml import etree import webob from nova.api.openstack.compute.contrib import quota_classes from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova import test from nova.tests.unit.api.openstack import fakes def quota_set(class_name): return {'quota_class_set': {'id': class_name, 'metadata_items': 128, 'ram': 51200, 'floating_ips': 10, 'fixed_ips': -1, 'instances': 10, 'injected_files': 5, 'cores': 20, 'injected_file_content_bytes': 10240, 'security_groups': 10, 'security_group_rules': 20, 'key_pairs': 100, 'injected_file_path_bytes': 255}} class QuotaClassSetsTest(test.TestCase): def setUp(self): super(QuotaClassSetsTest, self).setUp() self.ext_mgr = extensions.ExtensionManager() self.ext_mgr.extensions = {} self.controller = quota_classes.QuotaClassSetsController(self.ext_mgr) def test_format_quota_set(self): raw_quota_set = { 'instances': 10, 'cores': 20, 'ram': 51200, 'floating_ips': 10, 'fixed_ips': -1, 'metadata_items': 128, 'injected_files': 5, 'injected_file_path_bytes': 255, 'injected_file_content_bytes': 10240, 'security_groups': 10, 'security_group_rules': 20, 'key_pairs': 100, } quota_set = self.controller._format_quota_set('test_class', raw_quota_set) qs = quota_set['quota_class_set'] self.assertEqual(qs['id'], 'test_class') self.assertEqual(qs['instances'], 10) self.assertEqual(qs['cores'], 20) self.assertEqual(qs['ram'], 51200) self.assertEqual(qs['floating_ips'], 10) self.assertEqual(qs['fixed_ips'], -1) self.assertEqual(qs['metadata_items'], 128) self.assertEqual(qs['injected_files'], 5) self.assertEqual(qs['injected_file_path_bytes'], 255) self.assertEqual(qs['injected_file_content_bytes'], 10240) self.assertEqual(qs['security_groups'], 10) self.assertEqual(qs['security_group_rules'], 20) self.assertEqual(qs['key_pairs'], 100) def test_quotas_show_as_admin(self): req = fakes.HTTPRequest.blank( '/v2/fake4/os-quota-class-sets/test_class', use_admin_context=True) res_dict = self.controller.show(req, 'test_class') self.assertEqual(res_dict, quota_set('test_class')) def test_quotas_show_as_unauthorized_user(self): req = fakes.HTTPRequest.blank( '/v2/fake4/os-quota-class-sets/test_class') self.assertRaises(webob.exc.HTTPForbidden, self.controller.show, req, 'test_class') def test_quotas_update_as_admin(self): body = {'quota_class_set': {'instances': 50, 'cores': 50, 'ram': 51200, 'floating_ips': 10, 'fixed_ips': -1, 'metadata_items': 128, 'injected_files': 5, 'injected_file_content_bytes': 10240, 'injected_file_path_bytes': 255, 'security_groups': 10, 'security_group_rules': 20, 'key_pairs': 100}} req = fakes.HTTPRequest.blank( '/v2/fake4/os-quota-class-sets/test_class', use_admin_context=True) res_dict = self.controller.update(req, 'test_class', body) self.assertEqual(res_dict, body) def test_quotas_update_as_user(self): body = {'quota_class_set': {'instances': 50, 'cores': 50, 'ram': 51200, 'floating_ips': 10, 'fixed_ips': -1, 'metadata_items': 128, 'injected_files': 5, 'injected_file_content_bytes': 10240, 'security_groups': 10, 'security_group_rules': 20, 'key_pairs': 100, }} req = fakes.HTTPRequest.blank( '/v2/fake4/os-quota-class-sets/test_class') self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, req, 'test_class', body) def test_quotas_update_with_empty_body(self): body = {} req = fakes.HTTPRequest.blank( '/v2/fake4/os-quota-class-sets/test_class', use_admin_context=True) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, 'test_class', body) def test_quotas_update_with_non_integer(self): body = {'quota_class_set': {'instances': "abc"}} req = fakes.HTTPRequest.blank( '/v2/fake4/os-quota-class-sets/test_class', use_admin_context=True) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, 'test_class', body) body = {'quota_class_set': {'instances': 50.5}} req = fakes.HTTPRequest.blank( '/v2/fake4/os-quota-class-sets/test_class', use_admin_context=True) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, 'test_class', body) body = {'quota_class_set': { 'instances': u'\u30aa\u30fc\u30d7\u30f3'}} req = fakes.HTTPRequest.blank( '/v2/fake4/os-quota-class-sets/test_class', use_admin_context=True) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, 'test_class', body) class QuotaTemplateXMLSerializerTest(test.TestCase): def setUp(self): super(QuotaTemplateXMLSerializerTest, self).setUp() self.serializer = quota_classes.QuotaClassTemplate() self.deserializer = wsgi.XMLDeserializer() def test_serializer(self): exemplar = dict(quota_class_set=dict( id='test_class', metadata_items=10, injected_file_path_bytes=255, injected_file_content_bytes=20, ram=50, floating_ips=60, fixed_ips=-1, instances=70, injected_files=80, security_groups=10, security_group_rules=20, key_pairs=100, cores=90)) text = self.serializer.serialize(exemplar) tree = etree.fromstring(text) self.assertEqual('quota_class_set', tree.tag) self.assertEqual('test_class', tree.get('id')) self.assertEqual(len(exemplar['quota_class_set']) - 1, len(tree)) for child in tree: self.assertIn(child.tag, exemplar['quota_class_set']) self.assertEqual(int(child.text), exemplar['quota_class_set'][child.tag]) def test_deserializer(self): exemplar = dict(quota_class_set=dict( metadata_items='10', injected_file_content_bytes='20', ram='50', floating_ips='60', fixed_ips='-1', instances='70', injected_files='80', security_groups='10', security_group_rules='20', key_pairs='100', cores='90')) intext = ("<?xml version='1.0' encoding='UTF-8'?>\n" '<quota_class_set>' '<metadata_items>10</metadata_items>' '<injected_file_content_bytes>20' '</injected_file_content_bytes>' '<ram>50</ram>' '<floating_ips>60</floating_ips>' '<fixed_ips>-1</fixed_ips>' '<instances>70</instances>' '<injected_files>80</injected_files>' '<cores>90</cores>' '<security_groups>10</security_groups>' '<security_group_rules>20</security_group_rules>' '<key_pairs>100</key_pairs>' '</quota_class_set>') result = self.deserializer.deserialize(intext)['body'] self.assertEqual(result, exemplar)
{ "content_hash": "f5c5ab8dca625f1a81b676cbbcdf5f63", "timestamp": "", "source": "github", "line_count": 207, "max_line_length": 78, "avg_line_length": 41.68599033816425, "alnum_prop": 0.5158187507243017, "repo_name": "luzheqi1987/nova-annotation", "id": "228b44f369ce02f49d3dcfbd94d7de0528a9e766", "size": "9265", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "nova/tests/unit/api/openstack/compute/contrib/test_quota_classes.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "15206909" }, { "name": "Shell", "bytes": "18273" } ], "symlink_target": "" }
from collections import defaultdict # 3rd party from boto.s3.connection import S3Connection import simplejson as json # project from checks import AgentCheck from config import _is_affirmative def multidict(ordered_pairs): """Convert duplicate keys values to lists.""" # read all values into lists d = defaultdict(list) for k, v in ordered_pairs: d[k].append(v) # unpack lists that have only 1 item for k, v in d.items(): if len(v) == 1: d[k] = v[0] return dict(d) class RiakCs(AgentCheck): STATS_BUCKET = 'riak-cs' STATS_KEY = 'stats' SERVICE_CHECK_NAME = 'riakcs.can_connect' def check(self, instance): s3, aggregation_key, tags = self._connect(instance) stats = self._get_stats(s3, aggregation_key) self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK, tags=["aggregation_key:{0}".format(aggregation_key)]) self.process_stats(stats, tags) def process_stats(self, stats, tags): if not stats: raise Exception("No stats were collected") legends = dict([(len(k), k) for k in stats["legend"]]) del stats["legend"] for key, values in stats.iteritems(): legend = legends[len(values)] for i, value in enumerate(values): metric_name = "riakcs.{0}.{1}".format(key, legend[i]) self.gauge(metric_name, value, tags=tags) def _connect(self, instance): for e in ("access_id", "access_secret"): if e not in instance: raise Exception("{0} parameter is required.".format(e)) s3_settings = { "aws_access_key_id": instance.get('access_id', None), "aws_secret_access_key": instance.get('access_secret', None), "proxy": instance.get('host', 'localhost'), "proxy_port": int(instance.get('port', 8080)), "is_secure": _is_affirmative(instance.get('is_secure', True)) } if instance.get('s3_root'): s3_settings['host'] = instance['s3_root'] aggregation_key = s3_settings['proxy'] + ":" + str(s3_settings['proxy_port']) try: s3 = S3Connection(**s3_settings) except Exception, e: self.log.error("Error connecting to {0}: {1}".format(aggregation_key, e)) self.service_check( self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL, tags=["aggregation_key:{0}".format(aggregation_key)], message=str(e)) raise tags = instance.get("tags", []) tags.append("aggregation_key:{0}".format(aggregation_key)) return s3, aggregation_key, tags def _get_stats(self, s3, aggregation_key): try: bucket = s3.get_bucket(self.STATS_BUCKET, validate=False) key = bucket.get_key(self.STATS_KEY) stats_str = key.get_contents_as_string() stats = self.load_json(stats_str) except Exception, e: self.log.error("Error retrieving stats from {0}: {1}".format(aggregation_key, e)) self.service_check( self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL, tags=["aggregation_key:{0}".format(aggregation_key)], message=str(e)) raise return stats # We need this as the riak cs stats page returns json with duplicate keys @classmethod def load_json(cls, text): return json.JSONDecoder(object_pairs_hook=multidict).decode(text)
{ "content_hash": "fcf8fadaf0fc13becf9ea260426c909c", "timestamp": "", "source": "github", "line_count": 108, "max_line_length": 93, "avg_line_length": 32.94444444444444, "alnum_prop": 0.5865654862282181, "repo_name": "pmav99/praktoras", "id": "b47087dbef36356428b9cab520c227ca011d3084", "size": "3750", "binary": false, "copies": "1", "ref": "refs/heads/conmon-13", "path": "checks.d/riakcs.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "ApacheConf", "bytes": "2717" }, { "name": "Go", "bytes": "2389" }, { "name": "HTML", "bytes": "9060" }, { "name": "Nginx", "bytes": "3908" }, { "name": "PowerShell", "bytes": "2661" }, { "name": "Python", "bytes": "2179610" }, { "name": "Ruby", "bytes": "103726" }, { "name": "Shell", "bytes": "58242" }, { "name": "XSLT", "bytes": "2222" } ], "symlink_target": "" }
from dateutil.parser import parse as dateutil_parse import json import logging import re from django.conf import settings from libya_elections.constants import INCOMING from .models import VumiLog logger = logging.getLogger(__name__) LOG_PATTERN = { 'smpp_inbound': re.compile( r'(?P<date>[\d\-\+\:\sT]+) .*' r'Processed inbound message .*?' r': (?P<message>.*)'), 'smpp_outbound': re.compile( r'(?P<date>[\d\-\+\:\sT]+) .*' r'Processed outbound message .*?' r': (?P<message>.*)'), } class LogParser(object): """ Parses Vumi TransportUserMessages from a log file and saves log entries to the database in the VumiLog table. Two common output formats are the one used by SMPP for logging: `YYYY-MM-DDTHH:MM:SS+0000 <bits of text> PUBLISHING INBOUND: { 'from_addr': '218918510226', 'to_addr': '15015', 'content': '903039#1981', 'transport_type': 'sms', 'transport_metadata': {}, 'message_id': 'b5c53932-b13b-4453-8b99-728e66d23062'}` `YYYY-MM-DDTHH:MM:SS+0000 <bits of text> Processed outbound message for <bits of text>: { "transport_name": "almadar_smpp_transport_15015", "in_reply_to": "b5c53932-b13b-4453-8b99-728e66d23062", "group": null, "from_addr": "15015", "timestamp": "2014-04-15 18:04:32.161446", "to_addr": "218918510226", "content": "", "routing_metadata": {"endpoint_name": "default"}, "message_version": "20110921", "transport_type": "sms", "helper_metadata": {"rapidsms": {"rapidsms_msg_id": "8b34b0971f9c412d84a2c58a9af5ed65"}}, "transport_metadata": {}, "session_event": null, "message_id": "8b34b0971f9c412d64a2c58a9af5ed65", "message_type": "user_message"}' """ def __init__(self, direction): self.direction = direction self.vumi_logs = settings.VUMI_LOGS if direction == INCOMING: self.log_pattern = LOG_PATTERN.get('smpp_inbound') else: self.log_pattern = LOG_PATTERN.get('smpp_outbound') def parse(self): """Open log files and parse them line by line.""" for log in self.vumi_logs: try: with open(log) as log_file: for line in log_file: try: self.parse_line(line) except IndexError: # if a exception is risen while parsing a line, # we should continue parsing the file. logger.info("Exception in line: {0}".format(line)) except IOError as ex: # file does not exist logger.info(ex) def parse_line(self, line): """Parses and save a log entry to database. If there is more stuff being logged that will not match the regexp, those lines will be ignored.""" match = self.log_pattern.match(line) if match: data = match.groupdict() date = dateutil_parse(data['date']) kwargs = json.loads(data['message']) logger.debug("parsing {0}".format(data['message'])) self.save(date, line, **kwargs) def save(self, date, raw_text, **kwargs): """Saves parsed log entry to the db and returns the instance or None. Testing and production share a log file, only the log entries corresponding to the current environment are saved. """ if self.direction == INCOMING: uuid = kwargs['message_id'] else: # vumi stores the outgoing uuid in a different key uuid = kwargs['helper_metadata']['rapidsms']['rapidsms_msg_id'] from_addr = kwargs.get('from_addr', '') to_addr = kwargs.get('to_addr', '') content = kwargs.get('content', '') short_codes = settings.SHORT_CODES if from_addr in short_codes or to_addr in short_codes: # If the short code used belongs to the right environment, # the log entry needs to be saved, and ignored otherwise. log, _ = VumiLog.objects.get_or_create( uuid=uuid, direction=self.direction, defaults={'logged_date': date, 'raw_text': raw_text, 'from_addr': from_addr, 'to_addr': to_addr, 'content': content} ) return log
{ "content_hash": "cfe7dcfbb1d4e6f1df718519d91e450b", "timestamp": "", "source": "github", "line_count": 116, "max_line_length": 97, "avg_line_length": 38.62068965517241, "alnum_prop": 0.5678571428571428, "repo_name": "SmartElect/SmartElect", "id": "ff9720c9c0eb143ae60f5731ddcf45ed01efdebc", "size": "4595", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "audit/parser.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "43928" }, { "name": "HTML", "bytes": "175822" }, { "name": "JavaScript", "bytes": "475284" }, { "name": "Python", "bytes": "1848271" }, { "name": "Shell", "bytes": "1834" } ], "symlink_target": "" }
from org.transcrypt.stubs.browser import __pragma__ import re from basictests import * def run (test): """ basic tests of the re engine. The point is to exercise most of the methods to make sure they behave as expected. These tests are expected to provide exhaustive coverage of the regex engine. """ checkFlagsExist(test) escapeTests(test) checkMatchProperties(test) checkRegexProperties(test) checkIgnoreCase(test) checkSearchWithGroups(test) checkMatchOps(test) checkMatchWithGroups(test) # checkMatchWithNamedGroups(test) # !!! @JdeH temporarily disabled this checkFullMatchOps(test) checkFindAllOps(test) checkSplitOps(test) checkSubOps(test) checkSyntaxErrors(test) checkConditionalGroups(test) checkCommentGroup(test) checkWithFlags(test) checkFindIter(test)
{ "content_hash": "be55bbe0773a9311e9c34a5c266eb419", "timestamp": "", "source": "github", "line_count": 33, "max_line_length": 85, "avg_line_length": 27.484848484848484, "alnum_prop": 0.6945975744211687, "repo_name": "QQuick/Transcrypt", "id": "05d433a2c3bd73cb00b30f8f61d82e1fe9afedaa", "size": "1031", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "transcrypt/development/automated_tests/re/basic_pyre.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "5571" }, { "name": "C++", "bytes": "603" }, { "name": "HTML", "bytes": "28940" }, { "name": "JavaScript", "bytes": "662845" }, { "name": "Makefile", "bytes": "5733" }, { "name": "Python", "bytes": "1347703" }, { "name": "Shell", "bytes": "471" } ], "symlink_target": "" }
import pytest import numpy as np from sklearn.datasets import make_classification from sklearn.model_selection import train_test_split from sklearn.utils._testing import assert_array_equal from imblearn.ensemble import RUSBoostClassifier @pytest.fixture def imbalanced_dataset(): return make_classification( n_samples=10000, n_features=3, n_informative=2, n_redundant=0, n_repeated=0, n_classes=3, n_clusters_per_class=1, weights=[0.01, 0.05, 0.94], class_sep=0.8, random_state=0, ) @pytest.mark.parametrize( "boosting_params, err_type, err_msg", [ ( {"n_estimators": "whatever"}, TypeError, "n_estimators must be an instance of int, not str.", ), ({"n_estimators": -100}, ValueError, "n_estimators == -100, must be >= 1."), ], ) def test_rusboost_error(imbalanced_dataset, boosting_params, err_type, err_msg): rusboost = RUSBoostClassifier(**boosting_params) with pytest.raises(err_type, match=err_msg): rusboost.fit(*imbalanced_dataset) @pytest.mark.parametrize("algorithm", ["SAMME", "SAMME.R"]) def test_rusboost(imbalanced_dataset, algorithm): X, y = imbalanced_dataset X_train, X_test, y_train, y_test = train_test_split( X, y, stratify=y, random_state=1 ) classes = np.unique(y) n_estimators = 500 rusboost = RUSBoostClassifier( n_estimators=n_estimators, algorithm=algorithm, random_state=0 ) rusboost.fit(X_train, y_train) assert_array_equal(classes, rusboost.classes_) # check that we have an ensemble of samplers and estimators with a # consistent size assert len(rusboost.estimators_) > 1 assert len(rusboost.estimators_) == len(rusboost.samplers_) assert len(rusboost.pipelines_) == len(rusboost.samplers_) # each sampler in the ensemble should have different random state assert len({sampler.random_state for sampler in rusboost.samplers_}) == len( rusboost.samplers_ ) # each estimator in the ensemble should have different random state assert len({est.random_state for est in rusboost.estimators_}) == len( rusboost.estimators_ ) # check the consistency of the feature importances assert len(rusboost.feature_importances_) == imbalanced_dataset[0].shape[1] # check the consistency of the prediction outpus y_pred = rusboost.predict_proba(X_test) assert y_pred.shape[1] == len(classes) assert rusboost.decision_function(X_test).shape[1] == len(classes) score = rusboost.score(X_test, y_test) assert score > 0.6, f"Failed with algorithm {algorithm} and score {score}" y_pred = rusboost.predict(X_test) assert y_pred.shape == y_test.shape @pytest.mark.parametrize("algorithm", ["SAMME", "SAMME.R"]) def test_rusboost_sample_weight(imbalanced_dataset, algorithm): X, y = imbalanced_dataset sample_weight = np.ones_like(y) rusboost = RUSBoostClassifier(algorithm=algorithm, random_state=0) # Predictions should be the same when sample_weight are all ones y_pred_sample_weight = rusboost.fit(X, y, sample_weight).predict(X) y_pred_no_sample_weight = rusboost.fit(X, y).predict(X) assert_array_equal(y_pred_sample_weight, y_pred_no_sample_weight) rng = np.random.RandomState(42) sample_weight = rng.rand(y.shape[0]) y_pred_sample_weight = rusboost.fit(X, y, sample_weight).predict(X) with pytest.raises(AssertionError): assert_array_equal(y_pred_no_sample_weight, y_pred_sample_weight)
{ "content_hash": "38261c373b82f7afbbef12b4391a6def", "timestamp": "", "source": "github", "line_count": 107, "max_line_length": 84, "avg_line_length": 33.61682242990654, "alnum_prop": 0.6750069502363081, "repo_name": "scikit-learn-contrib/imbalanced-learn", "id": "e1394a2b58b3895a01ef601020aa380953df297b", "size": "3597", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "imblearn/ensemble/tests/test_weight_boosting.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "695" }, { "name": "Python", "bytes": "672318" }, { "name": "Shell", "bytes": "22112" }, { "name": "TeX", "bytes": "7014" } ], "symlink_target": "" }
import os import sys import vcr DIR = os.path.dirname(os.path.realpath(__file__)) sys.path.insert(0, os.path.join(DIR, "../../limbo/plugins")) from cat import on_message def test_cat(): with vcr.use_cassette("test/fixtures/cat.yaml"): ret = on_message({"text": u"!cat"}, None) assert "https://cdn2.thecatapi.com/images/MTc3MDc5Ng.gif" in ret
{ "content_hash": "7db3b169c96e68e7776112b147a966b3", "timestamp": "", "source": "github", "line_count": 15, "max_line_length": 72, "avg_line_length": 24.466666666666665, "alnum_prop": 0.662125340599455, "repo_name": "llimllib/limbo", "id": "6486e59d1ed84b35f5c4740aa430a5c1f9965608", "size": "391", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "test/test_plugins/test_cat.py", "mode": "33188", "license": "mit", "language": [ { "name": "Dockerfile", "bytes": "627" }, { "name": "Makefile", "bytes": "1385" }, { "name": "Procfile", "bytes": "25" }, { "name": "Python", "bytes": "127466" }, { "name": "Shell", "bytes": "264" } ], "symlink_target": "" }
from . import read_table_bigquery def test_read_table(capsys): read_table_bigquery.read_table() out, _ = capsys.readouterr() assert "country_name" in out
{ "content_hash": "af0cde55dec912f76142690865ee0218", "timestamp": "", "source": "github", "line_count": 7, "max_line_length": 36, "avg_line_length": 24, "alnum_prop": 0.6904761904761905, "repo_name": "googleapis/python-bigquery-storage", "id": "c8301857108fb6ae4f86b983d623583b25a7f700", "size": "743", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "samples/to_dataframe/read_table_bigquery_test.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "2050" }, { "name": "Python", "bytes": "1136897" }, { "name": "Shell", "bytes": "30690" } ], "symlink_target": "" }
import shutil import tempfile import os def _generate_class_single(a, b): class C(a, b): pass return C def generate_class_from_many(*args): current = args[0] args = args[1:] for argument in args: current = _generate_class_single(current, argument) return current class TempExtractArchive(object): def __init__(self, local_path): self.temp_directory = tempfile.mkdtemp() self.temp_dir_save = None if hasattr(local_path, "name"): self.temp_dir_save = tempfile.mkdtemp() path = os.path.join(self.temp_dir_save, local_path.name) with open(path, "wb") as fp: fp.write(local_path.read()) local_path = path self.local_path = local_path def __enter__(self): shutil.unpack_archive(self.local_path, self.temp_directory) return self.temp_directory def __exit__(self, exc_type, exc_val, exc_tb): shutil.rmtree(self.temp_directory) if self.temp_dir_save: shutil.rmtree(self.temp_dir_save) class TempArchive(object): def __init__(self, local_path, ext=None): if not ext: file_path, ext = os.path.splitext(local_path) else: file_path = local_path self.temp_directory = tempfile.mkdtemp() self.path = os.path.join(self.temp_directory, os.path.basename(file_path)) if ext.startswith("."): ext = ext[1:] shutil.make_archive(self.path, ext, local_path) self.path = "{0}.{1}".format(self.path, ext) self.fp = open(self.path, "rb") def __delete__(self, instance): self.fp.close() shutil.rmtree(self.temp_directory)
{ "content_hash": "513b2746e2817065e114aec6cbc95b63", "timestamp": "", "source": "github", "line_count": 61, "max_line_length": 82, "avg_line_length": 28.262295081967213, "alnum_prop": 0.5904872389791184, "repo_name": "jonatanSh/challenge-framework", "id": "72215116768283f0aa0d0f3aab3f24047f051257", "size": "1724", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "challenge_framework/challenge_framework/lib/utils.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "165605" }, { "name": "Dockerfile", "bytes": "930" }, { "name": "HTML", "bytes": "977387" }, { "name": "JavaScript", "bytes": "195045" }, { "name": "Python", "bytes": "74080" }, { "name": "Shell", "bytes": "127" } ], "symlink_target": "" }
import renpy.display from renpy.display.render import render class Transition(renpy.display.core.Displayable): """ This is the base class of most transitions. It takes care of event dispatching. """ def __init__(self, delay, **properties): super(Transition, self).__init__(**properties) self.delay = delay self.events = True def event(self, ev, x, y, st): if self.events or ev.type == renpy.display.core.TIMEEVENT: return self.new_widget.event(ev, x, y, st) # E1101 else: return None def visit(self): return [ self.new_widget, self.old_widget ] # E1101 def null_render(d, width, height, st, at): d.events = True surf = renpy.display.render.render(d.new_widget, width, height, st, at) rv = renpy.display.render.Render(surf.width, surf.height) rv.blit(surf, (0, 0)) return rv class NoTransition(Transition): """ :doc: transition function :name: Pause :args: (delay) Returns a transition that only displays the new screen for `delay` seconds. It can be useful as part of a MultipleTransition. """ def __init__(self, delay, old_widget=None, new_widget=None, **properties): super(NoTransition, self).__init__(delay, **properties) self.old_widget = old_widget self.new_widget = new_widget self.events = True def render(self, width, height, st, at): return null_render(self, width, height, st, at) class MultipleTransition(Transition): """ :doc: transition function :args: (args) Returns a transition that allows multiple transitions to be displayed, one after the other. `args` A *list* containing an odd number of items. The first, third, and other odd-numbered items must be scenes, and the even items must be transitions. A scene can be one of: * A displayable. * False, to use the old scene. * True, to use the new scene. Almost always, the first argument will be False and the last True. The transitions in `args` are applied in order. For each transition, the old scene is the screen preceding it, and the new scene is the scene following it. For example:: define logodissolve = MultipleTransition([ False, Dissolve(0.5), "logo.jpg", Pause(1.0), "logo.jpg", dissolve, True]) This example will dissolve to logo.jpg, wait 1 second, and then dissolve to the new scene. """ def __init__(self, args, old_widget=None, new_widget=None, **properties): if len(args) % 2 != 1 or len(args) < 3: raise Exception("MultipleTransition requires an odd number of arguments, and at least 3 arguments.") self.transitions = [ ] # The screens that we use for the transition. self.screens = [ renpy.easy.displayable(i) for i in args[0::2] ] def oldnew(w): if w is False: return old_widget if w is True: return new_widget return w for old, trans, new in zip(self.screens[0:], args[1::2], self.screens[1:]): old = oldnew(old) new = oldnew(new) self.transitions.append(trans(old_widget=old, new_widget=new)) super(MultipleTransition, self).__init__(sum([i.delay for i in self.transitions]), **properties) self.new_widget = self.transitions[-1] self.events = False def visit(self): return [ i for i in self.screens if isinstance(i, renpy.display.core.Displayable)] + self.transitions def event(self, ev, x, y, st): if self.events or ev.type == renpy.display.core.TIMEEVENT: return self.transitions[-1].event(ev, x, y, st) else: return None def render(self, width, height, st, at): if renpy.game.less_updates: return null_render(self, width, height, st, at) for trans in self.transitions[:-1]: if trans.delay > st: break st -= trans.delay else: trans = self.transitions[-1] self.events = True if trans is not self.transitions[-1]: renpy.display.render.render(self.transitions[-1], width, height, 0, 0) surf = renpy.display.render.render(trans, width, height, st, at) width, height = surf.get_size() rv = renpy.display.render.Render(width, height) rv.blit(surf, (0, 0)) if st < trans.delay: renpy.display.render.redraw(self, trans.delay - st) return rv def Fade(out_time, hold_time, in_time, old_widget=None, new_widget=None, color=None, widget=None, alpha=False, ): """ :doc: transition function :args: (out_time, hold_time, in_time, color="#000") :name: Fade Returns a transition that takes `out_time` seconds to fade to a screen filled with `color`, holds at that screen for `hold_time` seconds, and then takes `in_time` to fade to then new screen. :: # Fade to black and back. define fade = Fade(0.5, 0.0, 0.5) # Hold at black for a bit. define fadehold = Fade(0.5, 1.0, 0.5) # Camera flash - quickly fades to white, then back to the scene. define flash = Fade(0.1, 0.0, 0.5, color="#fff") """ dissolve = renpy.curry.curry(Dissolve) notrans = renpy.curry.curry(NoTransition) widget = renpy.easy.displayable_or_none(widget) if color: widget = renpy.display.image.Solid(color) if not widget: widget = renpy.display.image.Solid((0, 0, 0, 255)) args = [ False, dissolve(out_time, alpha=alpha), widget ] if hold_time: args.extend([ notrans(hold_time), widget, ]) args.extend([dissolve(in_time, alpha=alpha), True ]) return MultipleTransition(args, old_widget=old_widget, new_widget=new_widget) class Pixellate(Transition): """ :doc: transition function :args: (time, steps) :name: Pixellate Returns a transition that pixellates out the old screen, and then pixellates in the new screen. `time` The total time the transition will take, in seconds. `steps` The number of steps that will occur, in each direction. Each step creates pixels about twice the size of those in the previous step, so a 5-step pixellation will create 32x32 pixels. """ def __init__(self, time, steps, old_widget=None, new_widget=None, **properties): time = float(time) super(Pixellate, self).__init__(time, **properties) self.time = time self.steps = steps self.old_widget = old_widget self.new_widget = new_widget self.events = False self.quantum = time / (2 * steps) def render(self, width, height, st, at): if renpy.game.less_updates: return null_render(self, width, height, st, at) if st >= self.time: self.events = True return render(self.new_widget, width, height, st, at) step = st // self.quantum + 1 visible = self.old_widget if step > self.steps: step = (self.steps * 2) - step + 1 visible = self.new_widget self.events = True rdr = render(visible, width, height, st, at) rv = renpy.display.render.Render(rdr.width, rdr.height) rv.blit(rdr, (0, 0)) rv.operation = renpy.display.render.PIXELLATE rv.operation_parameter = 2 ** step renpy.display.render.redraw(self, 0) return rv class Dissolve(Transition): """ :doc: transition function :args: (time, alpha=False, time_warp=None) :name: Dissolve Returns a transition that dissolves from the old scene to the new scene. `time` The time the dissolve will take. `alpha` If true, the dissolve will alpha-composite the result of the transition with the screen. If false, the result of the transition will replace the screen, which is more efficient. `time_warp` A function that adjusts the timeline. If not None, this should be a function that takes a fractional time between 0.0 and 1.0, and returns a number in the same range. """ __version__ = 1 def after_upgrade(self, version): if version < 1: self.alpha = False time_warp = None def __init__(self, time, old_widget=None, new_widget=None, alpha=False, time_warp=None, **properties): super(Dissolve, self).__init__(time, **properties) self.time = time self.old_widget = old_widget self.new_widget = new_widget self.events = False self.alpha = alpha self.time_warp = time_warp def render(self, width, height, st, at): if renpy.game.less_updates: return null_render(self, width, height, st, at) if st >= self.time: self.events = True return render(self.new_widget, width, height, st, at) complete = min(1.0, st / self.time) if self.time_warp is not None: complete = self.time_warp(complete) bottom = render(self.old_widget, width, height, st, at) top = render(self.new_widget, width, height, st, at) width = min(top.width, bottom.width) height = min(top.height, bottom.height) rv = renpy.display.render.Render(width, height, opaque=not self.alpha) rv.operation = renpy.display.render.DISSOLVE rv.operation_alpha = self.alpha rv.operation_complete = complete rv.blit(bottom, (0, 0), focus=False, main=False) rv.blit(top, (0, 0), focus=True, main=True) renpy.display.render.redraw(self, 0) return rv class ImageDissolve(Transition): """ :doc: transition function :args: (image, time, ramplen=8, reverse=False, alpha=True, time_warp=None) :name: ImageDissolve Returns a transition that dissolves the old scene into the new scene, using an image to control the dissolve process. This means that white pixels will dissolve in first, and black pixels will dissolve in last. `image` A control image to use. This must be either an image file or image manipulator. The control image should be the size of the scenes being dissolved. `time` The time the dissolve will take. `ramplen` The length of the ramp to use. This must be an integer power of 2. When this is the default value of 8, when a white pixel is fully dissolved, a pixel 8 shades of gray darker will have completed one step of dissolving in. `reverse` If true, black pixels will dissolve in before white pixels. `alpha` If true, the dissolve will alpha-composite the result of the transition with the screen. If false, the result of the transition will replace the screen, which is more efficient. `time_warp` A function that adjusts the timeline. If not None, this should be a function that takes a fractional time between 0.0 and 1.0, and returns a number in the same range. :: define circirisout = ImageDissolve("circiris.png", 1.0) define circirisin = ImageDissolve("circiris.png", 1.0, reverse=True) define circiristbigramp = ImageDissolve("circiris.png", 1.0, ramplen=256) """ __version__ = 1 def after_upgrade(self, version): if version < 1: self.alpha = False time_warp = None def __init__( self, image, time, ramplen=8, ramptype='linear', ramp=None, reverse=False, alpha=False, old_widget=None, new_widget=None, time_warp=None, **properties): # ramptype and ramp are now unused, but are kept for compatbility with # older code. super(ImageDissolve, self).__init__(time, **properties) self.old_widget = old_widget self.new_widget = new_widget self.events = False self.alpha = alpha self.time_warp = time_warp if not reverse: # Copies red -> alpha matrix = renpy.display.im.matrix( 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0) else: # Copies 1-red -> alpha matrix = renpy.display.im.matrix( 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, - 1, 0, 0, 0, 1) self.image = renpy.display.im.MatrixColor(image, matrix) if ramp is not None: ramplen = len(ramp) # The length of the ramp. self.ramplen = max(ramplen, 1) def visit(self): return super(ImageDissolve, self).visit() + [ self.image ] def render(self, width, height, st, at): if renpy.game.less_updates or renpy.display.less_imagedissolve: return null_render(self, width, height, st, at) if st >= self.delay: self.events = True return render(self.new_widget, width, height, st, at) image = render(self.image, width, height, st, at) bottom = render(self.old_widget, width, height, st, at) top = render(self.new_widget, width, height, st, at) width = min(bottom.width, top.width, image.width) height = min(bottom.height, top.height, image.height) rv = renpy.display.render.Render(width, height, opaque=not self.alpha) complete = st / self.delay if self.time_warp is not None: complete = self.time_warp(complete) rv.operation = renpy.display.render.IMAGEDISSOLVE rv.operation_alpha = self.alpha rv.operation_complete = complete rv.operation_parameter = self.ramplen rv.blit(image, (0, 0), focus=False, main=False) rv.blit(bottom, (0, 0), focus=False, main=False) rv.blit(top, (0, 0), focus=True, main=True) renpy.display.render.redraw(self, 0) return rv class AlphaDissolve(Transition): """ :doc: transition function :args: (control, delay=0.0, alpha=False, reverse=False) Returns a transition that uses a control displayable (almost always some sort of animated transform) to transition from one screen to another. The transform is evaluated. The new screen is used where the transform is opaque, and the old image is used when it is transparent. `control` The control transform. `delay` The time the transition takes, before ending. `alpha` If true, the image is composited with what's behind it. If false, the default, the image is opaque and overwrites what's behind it. `reverse` If true, the alpha channel is reversed. Opaque areas are taken from the old image, while transparent areas are taken from the new image. """ def __init__( self, control, delay=0.0, old_widget=None, new_widget=None, alpha=False, reverse=False, **properties): super(AlphaDissolve, self).__init__(delay, **properties) self.control = renpy.display.layout.Fixed() self.control.add(control) self.old_widget = renpy.easy.displayable(old_widget) self.new_widget = renpy.easy.displayable(new_widget) self.events = False self.alpha = alpha self.reverse = reverse def visit(self): return super(AlphaDissolve, self).visit() + [ self.control ] def render(self, width, height, st, at): if renpy.game.less_updates or renpy.display.less_imagedissolve: return null_render(self, width, height, st, at) if st >= self.delay: self.events = True bottom = render(self.old_widget, width, height, st, at) top = render(self.new_widget, width, height, st, at) width = min(bottom.width, top.width) height = min(bottom.height, top.height) control = render(self.control, width, height, st, at) rv = renpy.display.render.Render(width, height, opaque=not self.alpha) rv.operation = renpy.display.render.IMAGEDISSOLVE rv.operation_alpha = self.alpha rv.operation_complete = 256.0 / (256.0 + 256.0) rv.operation_parameter = 256 rv.blit(control, (0, 0), focus=False, main=False) if not self.reverse: rv.blit(bottom, (0, 0), focus=False, main=False) rv.blit(top, (0, 0), focus=True, main=True) else: rv.blit(top, (0, 0), focus=True, main=True) rv.blit(bottom, (0, 0), focus=False, main=False) return rv class CropMove(Transition): """ :doc: transition function :args: (time, mode="slideright", startcrop=(0.0, 0.0, 0.0, 1.0), startpos=(0.0, 0.0), endcrop=(0.0, 0.0, 1.0, 1.0), endpos=(0.0, 0.0), topnew=True) :name: CropMove Returns a transition that works by cropping a scene and positioning it on the screen. This can be used to implement a variety of effects, all of which involved changing rectangular slices of scenes. `time` The time the transition takes. `mode` The name of the mode of the transition. There are three groups of modes: wipes, slides, and other. This can also be "custom", to allow a custom mode to be defined. In a wipe, the image stays fixed, and more of it is revealed as the transition progresses. For example, in "wiperight", a wipe from left to right, first the left edge of the image is revealed at the left edge of the screen, then the center of the image, and finally the right side of the image at the right of the screen. Other supported wipes are "wipeleft", "wipedown", and "wipeup". In a slide, the image moves. So in a "slideright", the right edge of the image starts at the left edge of the screen, and moves to the right as the transition progresses. Other slides are "slideleft", "slidedown", and "slideup". There are also slideaways, in which the old image moves on top of the new image. Slideaways include "slideawayright", "slideawayleft", "slideawayup", and "slideawaydown". We also support a rectangular iris in with "irisin" and a rectangular iris out with "irisout". The following parameters are only respected if the mode is "custom". Positions are relative to the size of the screen, while the crops are relative to the size of the image. So a crop of (0.25, 0.0, 0.5, 1.0) takes the middle half of an image. `startcrop` The starting rectangle that is cropped out of the top image. A 4-element tuple containing x, y, width, and height. `startpos` The starting place that the top image is drawn to the screen at, a 2-element tuple containing x and y. `endcrop` The ending rectangle that is cropped out of the top image. A 4-element tuple containing x, y, width, and height. `endpos` The ending place that the top image is drawn to the screen at, a 2-element tuple containing x and y. `topnew` If true, the scene that is cropped and moved (and is on top of the other scene) is the new scene. If false, it is the old scene. :: define wiperight = CropMove(1.0, "wiperight") define wipeleft = CropMove(1.0, "wipeleft") define wipeup = CropMove(1.0, "wipeup") define wipedown = CropMove(1.0, "wipedown") define slideright = CropMove(1.0, "slideright") define slideleft = CropMove(1.0, "slideleft") define slideup = CropMove(1.0, "slideup") define slidedown = CropMove(1.0, "slidedown") define slideawayright = CropMove(1.0, "slideawayright") define slideawayleft = CropMove(1.0, "slideawayleft") define slideawayup = CropMove(1.0, "slideawayup") define slideawaydown = CropMove(1.0, "slideawaydown") define irisout = CropMove(1.0, "irisout") define irisin = CropMove(1.0, "irisin") """ def __init__(self, time, mode="slideright", startcrop=(0.0, 0.0, 0.0, 1.0), startpos=(0.0, 0.0), endcrop=(0.0, 0.0, 1.0, 1.0), endpos=(0.0, 0.0), topnew=True, old_widget=None, new_widget=None, **properties): super(CropMove, self).__init__(time, **properties) self.time = time if mode == "wiperight": startpos = (0.0, 0.0) startcrop = (0.0, 0.0, 0.0, 1.0) endpos = (0.0, 0.0) endcrop = (0.0, 0.0, 1.0, 1.0) topnew = True elif mode == "wipeleft": startpos = (1.0, 0.0) startcrop = (1.0, 0.0, 0.0, 1.0) endpos = (0.0, 0.0) endcrop = (0.0, 0.0, 1.0, 1.0) topnew = True elif mode == "wipedown": startpos = (0.0, 0.0) startcrop = (0.0, 0.0, 1.0, 0.0) endpos = (0.0, 0.0) endcrop = (0.0, 0.0, 1.0, 1.0) topnew = True elif mode == "wipeup": startpos = (0.0, 1.0) startcrop = (0.0, 1.0, 1.0, 0.0) endpos = (0.0, 0.0) endcrop = (0.0, 0.0, 1.0, 1.0) topnew = True elif mode == "slideright": startpos = (0.0, 0.0) startcrop = (1.0, 0.0, 0.0, 1.0) endpos = (0.0, 0.0) endcrop = (0.0, 0.0, 1.0, 1.0) topnew = True elif mode == "slideleft": startpos = (1.0, 0.0) startcrop = (0.0, 0.0, 0.0, 1.0) endpos = (0.0, 0.0) endcrop = (0.0, 0.0, 1.0, 1.0) topnew = True elif mode == "slideup": startpos = (0.0, 1.0) startcrop = (0.0, 0.0, 1.0, 0.0) endpos = (0.0, 0.0) endcrop = (0.0, 0.0, 1.0, 1.0) topnew = True elif mode == "slidedown": startpos = (0.0, 0.0) startcrop = (0.0, 1.0, 1.0, 0.0) endpos = (0.0, 0.0) endcrop = (0.0, 0.0, 1.0, 1.0) topnew = True elif mode == "slideawayleft": endpos = (0.0, 0.0) endcrop = (1.0, 0.0, 0.0, 1.0) startpos = (0.0, 0.0) startcrop = (0.0, 0.0, 1.0, 1.0) topnew = False elif mode == "slideawayright": endpos = (1.0, 0.0) endcrop = (0.0, 0.0, 0.0, 1.0) startpos = (0.0, 0.0) startcrop = (0.0, 0.0, 1.0, 1.0) topnew = False elif mode == "slideawaydown": endpos = (0.0, 1.0) endcrop = (0.0, 0.0, 1.0, 0.0) startpos = (0.0, 0.0) startcrop = (0.0, 0.0, 1.0, 1.0) topnew = False elif mode == "slideawayup": endpos = (0.0, 0.0) endcrop = (0.0, 1.0, 1.0, 0.0) startpos = (0.0, 0.0) startcrop = (0.0, 0.0, 1.0, 1.0) topnew = False elif mode == "irisout": startpos = (0.5, 0.5) startcrop = (0.5, 0.5, 0.0, 0.0) endpos = (0.0, 0.0) endcrop = (0.0, 0.0, 1.0, 1.0) topnew = True elif mode == "irisin": startpos = (0.0, 0.0) startcrop = (0.0, 0.0, 1.0, 1.0) endpos = (0.5, 0.5) endcrop = (0.5, 0.5, 0.0, 0.0) topnew = False elif mode == "custom": pass else: raise Exception("Invalid mode %s passed into CropMove." % mode) self.delay = time self.time = time self.startpos = startpos self.endpos = endpos self.startcrop = startcrop self.endcrop = endcrop self.topnew = topnew self.old_widget = old_widget self.new_widget = new_widget self.events = False if topnew: self.bottom = old_widget self.top = new_widget else: self.bottom = new_widget self.top = old_widget def render(self, width, height, st, at): if renpy.game.less_updates: return null_render(self, width, height, st, at) time = 1.0 * st / self.time # Done rendering. if time >= 1.0: self.events = True return render(self.new_widget, width, height, st, at) # How we scale each element of a tuple. scales = (width, height, width, height) def interpolate_tuple(t0, t1): return tuple([ int(s * (a * (1.0 - time) + b * time)) for a, b, s in zip(t0, t1, scales) ]) crop = interpolate_tuple(self.startcrop, self.endcrop) pos = interpolate_tuple(self.startpos, self.endpos) top = render(self.top, width, height, st, at) bottom = render(self.bottom, width, height, st, at) width = min(bottom.width, width) height = min(bottom.height, height) rv = renpy.display.render.Render(width, height) rv.blit(bottom, (0, 0), focus=not self.topnew) ss = top.subsurface(crop, focus=self.topnew) rv.blit(ss, pos, focus=self.topnew) renpy.display.render.redraw(self, 0) return rv class PushMove(Transition): """ :doc: transition function :args: (time, mode="pushright") :name: PushMove Returns a transition that works by taking the new scene and using it to "push" the old scene off the screen. `time` The time the transition takes. `mode` There are four possible modes: "pushright", "pushleft", "pushup", and "pushdown", which push the old scene off the screen in the direction indicated. :: define pushright = PushMove(1.0, "pushright") define pushleft = PushMove(1.0, "pushleft") define pushup = PushMove(1.0, "pushup") define pushdown = PushMove(1.0, "pushdown") """ def __init__(self, time, mode="pushright", old_widget=None, new_widget=None, **properties): super(PushMove, self).__init__(time, **properties) self.time = time if mode == "pushright": self.new_startpos = (0.0, 0.0) self.new_startcrop = (1.0, 0.0, 0.0, 1.0) self.new_endpos = (0.0, 0.0) self.new_endcrop = (0.0, 0.0, 1.0, 1.0) self.old_endpos = (1.0, 0.0) self.old_endcrop = (0.0, 0.0, 0.0, 1.0) self.old_startpos = (0.0, 0.0) self.old_startcrop = (0.0, 0.0, 1.0, 1.0) elif mode == "pushleft": self.new_startpos = (1.0, 0.0) self.new_startcrop = (0.0, 0.0, 0.0, 1.0) self.new_endpos = (0.0, 0.0) self.new_endcrop = (0.0, 0.0, 1.0, 1.0) self.old_endpos = (0.0, 0.0) self.old_endcrop = (1.0, 0.0, 0.0, 1.0) self.old_startpos = (0.0, 0.0) self.old_startcrop = (0.0, 0.0, 1.0, 1.0) elif mode == "pushup": self.new_startpos = (0.0, 1.0) self.new_startcrop = (0.0, 0.0, 1.0, 0.0) self.new_endpos = (0.0, 0.0) self.new_endcrop = (0.0, 0.0, 1.0, 1.0) self.old_endpos = (0.0, 0.0) self.old_endcrop = (0.0, 1.0, 1.0, 0.0) self.old_startpos = (0.0, 0.0) self.old_startcrop = (0.0, 0.0, 1.0, 1.0) elif mode == "pushdown": self.new_startpos = (0.0, 0.0) self.new_startcrop = (0.0, 1.0, 1.0, 0.0) self.new_endpos = (0.0, 0.0) self.new_endcrop = (0.0, 0.0, 1.0, 1.0) self.old_endpos = (0.0, 1.0) self.old_endcrop = (0.0, 0.0, 1.0, 0.0) self.old_startpos = (0.0, 0.0) self.old_startcrop = (0.0, 0.0, 1.0, 1.0) else: raise Exception("Invalid mode %s passed into PushMove." % mode) self.delay = time self.time = time self.old_widget = old_widget self.new_widget = new_widget self.events = False def render(self, width, height, st, at): if renpy.game.less_updates: return null_render(self, width, height, st, at) time = 1.0 * st / self.time # Done rendering. if time >= 1.0: self.events = True return render(self.new_widget, width, height, st, at) # How we scale each element of a tuple. scales = (width, height, width, height) def interpolate_tuple(t0, t1): return tuple([ int(s * (a * (1.0 - time) + b * time)) for a, b, s in zip(t0, t1, scales) ]) new_crop = interpolate_tuple(self.new_startcrop, self.new_endcrop) new_pos = interpolate_tuple(self.new_startpos, self.new_endpos) old_crop = interpolate_tuple(self.old_startcrop, self.old_endcrop) old_pos = interpolate_tuple(self.old_startpos, self.old_endpos) new = render(self.new_widget, width, height, st, at) old = render(self.old_widget, width, height, st, at) rv = renpy.display.render.Render(width, height) old_ss = old.subsurface(old_crop, focus=True) rv.blit(old_ss, old_pos, focus=True) new_ss = new.subsurface(new_crop, focus=True) rv.blit(new_ss, new_pos, focus=True) renpy.display.render.redraw(self, 0) return rv def ComposeTransition(trans, before=None, after=None, new_widget=None, old_widget=None): """ :doc: transition function :args: (trans, before, after) Returns a transition that composes up to three transitions. If not None, the `before` and `after` transitions are applied to the old and new scenes, respectively. These updated old and new scenes are then supplied to the `trans` transition. :: # Move the images in and out while dissolving. (This is a fairly expensive transition.) define moveinoutdissolve = ComposeTransition(dissolve, before=moveoutleft, after=moveinright) """ if before is not None: old = before(new_widget=new_widget, old_widget=old_widget) else: old = old_widget if after is not None: new = after(new_widget=new_widget, old_widget=old_widget) else: new = new_widget return trans(new_widget=new, old_widget=old) def SubTransition(rect, trans, old_widget=None, new_widget=None, **properties): """ Applies a transition to a subset of the screen. Not documented. """ x, y, _w, _h = rect old = renpy.display.layout.LiveCrop(rect, old_widget) new = renpy.display.layout.LiveCrop(rect, new_widget) inner = trans(old_widget=old, new_widget=new) delay = inner.delay inner = renpy.display.layout.Position(inner, xpos=x, ypos=y, xanchor=0, yanchor=0) f = renpy.display.layout.MultiBox(layout='fixed') f.add(new_widget) f.add(inner) return NoTransition(delay, old_widget=f, new_widget=f)
{ "content_hash": "30b720e39d197596e75eaeac8674fe45", "timestamp": "", "source": "github", "line_count": 1015, "max_line_length": 151, "avg_line_length": 31.219704433497537, "alnum_prop": 0.5680068164604898, "repo_name": "kfcpaladin/sze-the-game", "id": "31ae488ae349023afde11fac05e992bd265def69", "size": "32997", "binary": false, "copies": "1", "ref": "refs/heads/orphan", "path": "renpy/display/transition.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "3537204" }, { "name": "Ren'Py", "bytes": "943500" }, { "name": "Shell", "bytes": "2115" }, { "name": "Visual Basic", "bytes": "287" } ], "symlink_target": "" }
from bottle import * import auth import csv import psycopg2, psycopg2.extensions, psycopg2.extras psycopg2.extensions.register_type(psycopg2.extensions.UNICODE) # se znebimo problemov s sumniki ###################################################################### # priklopimo se na bazo conn = psycopg2.connect(database=auth.db, host=auth.host, user=auth.user, password=auth.password) conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) # onemogocimo transakcije cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor) slovar = {} ##with open('CSV1.csv', 'r') as f: ## csvfile = csv.reader(f) ## i = 0 ## for row in csvfile: ## if i != 0 and row != []: ## slovar[row[1]] = "" ## i += 1 ## ##for key in slovar: ## cur.execute("INSERT INTO hero (ime) VALUES (%s);", [key]) heros = {} heros["druid"] = 11 heros["hunter"] = 16 heros["mage"] = 12 heros["paladin"] = 10 heros["priest"] = 9 heros["rogue"] = 15 heros["shaman"] = 14 heros["warlock"] = 13 heros["warrior"] = 9 heros["vsi"] = 17 with open('pureHTML2_karte.csv', 'r') as f: csvfile = csv.reader(f,delimiter =';') for row in csvfile: cur.execute("INSERT INTO karte (ime,expansion,rarity,mana_cost,class) VALUES (%s,%s,%s,%s,%s);", [row[0],int(row[1]),int(row[2]),int(row[3]),heros[row[4]]])
{ "content_hash": "7fcf34307ca0839627d30ad79b611b40", "timestamp": "", "source": "github", "line_count": 55, "max_line_length": 164, "avg_line_length": 24.654545454545456, "alnum_prop": 0.6076696165191741, "repo_name": "JanezRadescek/HearthStone", "id": "689b548b524b5a37d480a3b5a402a9c161597973", "size": "1400", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "baza.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "8159" }, { "name": "HTML", "bytes": "21304" }, { "name": "JavaScript", "bytes": "3869" }, { "name": "Python", "bytes": "176603" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('mapdata', '0012_rename_section_to_level'), ] operations = [ migrations.AlterField( model_name='locationslug', name='slug', field=models.SlugField(null=True, unique=True, verbose_name='Slug'), ), ]
{ "content_hash": "dc5739d311f59a35e2894ffdf9280617", "timestamp": "", "source": "github", "line_count": 18, "max_line_length": 80, "avg_line_length": 23.166666666666668, "alnum_prop": 0.6067146282973621, "repo_name": "c3nav/c3nav", "id": "ec37dd17b825786e4472fbc500f91d52c1f12160", "size": "490", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/c3nav/mapdata/migrations/0013_auto_20170618_1934.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "986" }, { "name": "HTML", "bytes": "89944" }, { "name": "JavaScript", "bytes": "179692" }, { "name": "Python", "bytes": "1061013" }, { "name": "SCSS", "bytes": "41200" }, { "name": "Sass", "bytes": "11121" }, { "name": "Shell", "bytes": "90" } ], "symlink_target": "" }
import zmq import random import time import zmq_ports as ports import zmq_topics as topic from websocket import create_connection RCV_DELAY=0.01 WSPORT = 9000 ########### WEBSOCKET EVENTS ########### WEBSOCKET EVENTS if __name__ == '__main__': #print("Starting comm") # IPC context = zmq.Context() # Publisher comm_publisher = context.socket(zmq.PUB) comm_publisher.bind("tcp://*:%s" % ports.COMM_PUB) # Subscribe to commander commander_subscriber = context.socket(zmq.SUB) commander_subscriber.connect("tcp://localhost:%s" % ports.COMMANDER_PUB) commander_subscriber.setsockopt_string(zmq.SUBSCRIBE, topic.SENSOR_TOPIC) # Subscribe to Tornado websocket server browser_subscriber = context.socket(zmq.SUB) browser_subscriber.connect("tcp://localhost:%s" % ports.TORNADO_PUB) browser_subscriber.setsockopt_string(zmq.SUBSCRIBE, topic.COMMAND_TOPIC) # Subscribe to gps gps_subscriber = context.socket(zmq.SUB) gps_subscriber.connect("tcp://localhost:%s" % ports.GPS_PUB) gps_subscriber.setsockopt_string(zmq.SUBSCRIBE, topic.GPS_TOPIC) # Web scoket for sending data to the browser ws = create_connection("ws://localhost:9000/ws/") # Open a browser #browser_controller.start() connected=False while True: #xcomm.startVideoStream() # from browser to commander while True: try: msg = browser_subscriber.recv_string(zmq.DONTWAIT) except zmq.Again: break # process task msg = msg.strip(str(topic.COMMAND_TOPIC) + " ") print("from browser:", msg) # Connection state if(msg[0]=="Q"): comm_publisher.send_string("%s %s" % (topic.CONNECTION_TOPIC, msg[1])) connected=msg[1] else: comm_publisher.send_string("%s %s" % (topic.COMMAND_TOPIC, msg)) # from commander to browser - sensor data while True: try: msg = commander_subscriber.recv_string(zmq.DONTWAIT) msg=msg.strip(str(topic.SENSOR_TOPIC)+" ") #print(msg) #xcomm.sendMsg(msg) except zmq.Again: break # process task if(connected): ws.send("_" + msg) #print("comm received:", msg) # from commander to browser - gps data while True: try: msg = gps_subscriber.recv_string(zmq.DONTWAIT) msg = msg.strip(topic.GPS_TOPIC + " ") except zmq.Again: break # process task if(connected): ws.send("_g," + msg) time.sleep(0.005) ws.close()
{ "content_hash": "bd2a58deaebd9757796c031396ee911c", "timestamp": "", "source": "github", "line_count": 93, "max_line_length": 86, "avg_line_length": 30.053763440860216, "alnum_prop": 0.5738819320214669, "repo_name": "jeryfast/piflyer", "id": "8ea49c780943ff1b79ca51773c0ebb81e220b130", "size": "2795", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "piflyer/zmq_comm.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "HTML", "bytes": "11037" }, { "name": "Python", "bytes": "95834" } ], "symlink_target": "" }
import time,threading,subprocess from Naked.toolshed.shell import execute_rb, muterun_rb def foShizzleMyNizzlePollingTask(): print(time.ctime()) success = execute_rb('foshizzlemynizzle.rb') threading.Timer(60,foShizzleMyNizzlePollingTask).start() foShizzleMyNizzlePollingTask()
{ "content_hash": "99c43d6fb9e34a71653a472dba7ff12e", "timestamp": "", "source": "github", "line_count": 12, "max_line_length": 60, "avg_line_length": 24.916666666666668, "alnum_prop": 0.7759197324414716, "repo_name": "jusleg/storyflow", "id": "4cad2037c58b16da963d44793d02fd554bbd7166", "size": "299", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "rubCaller.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "2928" }, { "name": "HTML", "bytes": "3784" }, { "name": "JavaScript", "bytes": "2825" }, { "name": "PHP", "bytes": "3007" }, { "name": "Python", "bytes": "299" }, { "name": "Ruby", "bytes": "9915" } ], "symlink_target": "" }
try: from functools import lru_cache except ImportError: from functools32 import lru_cache import base64 import cgi import datetime import httplib2 import jinja2 import json import messageindex import os import re import time import urllib import webapp2 from google.appengine.api import app_identity from google.appengine.api import users from google.appengine.ext import ndb from oauth2client.client import GoogleCredentials JINJA_ENVIRONMENT = jinja2.Environment( loader=jinja2.FileSystemLoader(os.path.dirname(__file__)), extensions=['jinja2.ext.autoescape'], autoescape=True) _IDENTITY_ENDPOINT = ('https://identitytoolkit.googleapis.com/' 'google.identity.identitytoolkit.v1.IdentityToolkit') _FIREBASE_SCOPES = [ 'https://www.googleapis.com/auth/firebase.database', 'https://www.googleapis.com/auth/userinfo.email'] DEFAULT_TOPIC = 'chat' def messages_key(): """Constructs a Datastore key for the Messages table. """ return ndb.Key('Messages', 'Public') def sessions_key(): """Constructs a Datastore key for the Sessions table. """ return ndb.Key('Sessions', 'All') class Author(ndb.Model): """Sub model for representing an author.""" identity = ndb.StringProperty(indexed=False) nickname = ndb.StringProperty(indexed=False) email = ndb.StringProperty(indexed=False) class Message(ndb.Model): """A main model for representing an individual sent Message.""" author = ndb.StructuredProperty(Author) # Note that the date is the only indexed property. This is because this # table is only used for displaying the stream of messages, all searches are # done using the Search API: date = ndb.DateTimeProperty() topic = ndb.StringProperty(indexed=False) content = ndb.StringProperty(indexed=False) class Session(ndb.Model): """A main model for representing an user's session.""" client_id = ndb.StringProperty(indexed=True) # Not used, only for making administration easier: email = ndb.StringProperty(indexed=False) def message_to_struct(message): """Transforms a Message into a simple structure for passing to HTML.""" struct_message = { 'id': cgi.escape(message.date.isoformat()), 'nickname': cgi.escape(message.author.nickname), 'email': cgi.escape(message.author.email), 'date': cgi.escape(message.date.strftime('%x %X')), 'topic': cgi.escape(message.topic), 'content': cgi.escape(message.content).replace("\n", "<br>") } return struct_message def create_custom_token(uid, valid_minutes=59): """Create a secure token for the given id. This method is used to create secure custom JWT tokens to be passed to clients. It takes a unique id (user_id) that will be used by Firebase's security rules to prevent unauthorized access. """ # use the app_identity service from google.appengine.api to get the # project's service account email automatically client_email = app_identity.get_service_account_name() now = int(time.time()) # encode the required claims # per https://firebase.google.com/docs/auth/server/create-custom-tokens payload = base64.b64encode(json.dumps({ 'iss': client_email, 'sub': client_email, 'aud': _IDENTITY_ENDPOINT, 'uid': uid, # the important parameter, as it will be the channel id 'iat': now, 'exp': now + (valid_minutes * 60), })) # add standard header to identify this as a JWT header = base64.b64encode(json.dumps({'typ': 'JWT', 'alg': 'RS256'})) to_sign = '{}.{}'.format(header, payload) # Sign the jwt using the built in app_identity service return '{}.{}'.format(to_sign, base64.b64encode( app_identity.sign_blob(to_sign)[1])) class MainPage(webapp2.RequestHandler): """Generates the main web page.""" def get(self): user = users.get_current_user() if not user: # This should never happen, as AppEngine should only run this # handler if the user is signed in. But defense in depth applies... self.redirect(users.create_login_url(self.request.uri)) return # If this user has not used the system before, add their user_id to the # table of IDs which we attempt to broadcast all messages to. # # Room for improvement: right now this table will grow endlessly as more # and more people use the system. This may not scale if the system # becomes popular. We actually only want a list of people with open # sessions. # # Idea: have a heartbeat from clients, and expire entries in this table # which have not gotten a heartbeat in a long time. You might worry # that this server could be DoSed by getting too many heartbeats from a # large number of simultaneously active clients -- but this system is # already broadcasting to all active clients anyways, so we'll hit # scaling issues in the broadcast (which we'll have to solve) long # before we get DoSed by inbound heartbeats. query = Session.query(Session.client_id == user.user_id()) if query.iter().has_next(): session = query.iter().next() else: session = Session(parent=sessions_key()) session.client_id = user.user_id(); session.email = user.email(); session.put() topic = self.request.get('topic', DEFAULT_TOPIC) # encrypt the channel_id and send it as a custom token to the # client # Firebase's data security rules will be able to decrypt the # token and prevent unauthorized access token = create_custom_token(session.client_id) template_values = { 'user': user, 'topic': urllib.quote_plus(topic), 'token': token, 'channel_id': user.user_id(), } template = JINJA_ENVIRONMENT.get_template('index.html') self.response.write(template.render(template_values)) def safeStrToInt(s): try: return int(s) except ValueError: return 10 class SearchPage(webapp2.RequestHandler): """Generates the search results page.""" def get(self): self.post() def post(self): user = users.get_current_user() if not user: # This should never happen, as AppEngine should only run this # handler if the user is signed in. But defense in depth applies... self.redirect(users.create_login_url(self.request.uri)) return query = self.request.get('query', '') num_results = safeStrToInt(self.request.get('num_results', '10')) urlsafe_keys = messageindex.find(query, num_results) results = [] for urlsafe_key in urlsafe_keys: result = ndb.Key(urlsafe=urlsafe_key).get() if result: results.append(message_to_struct(result)) template_values = { 'query': query, 'num_results': num_results, 'results': results } template = JINJA_ENVIRONMENT.get_template('search.html') self.response.write(template.render(template_values)) # Memoize the value, to avoid parsing the code snippet every time @lru_cache() def _get_firebase_db_url(): """Grabs the databaseURL from the Firebase config snippet. Regex looks scary, but all it is doing is pulling the 'databaseURL' field from the Firebase javascript snippet""" regex = re.compile(r'\bdatabaseURL\b.*?["\']([^"\']+)') cwd = os.path.dirname(__file__) try: with open(os.path.join(cwd, 'index.html')) as f: url = next(regex.search(line) for line in f if regex.search( line)) except StopIteration: raise ValueError( 'Error parsing databaseURL. Please copy Firebase web snippet ' 'into index.html') return url.group(1) # Memoize the authorized http, to avoid fetching new access tokens @lru_cache() def _get_http(): """Provides an authed http object.""" http = httplib2.Http() # Use application default credentials to make the Firebase calls # https://firebase.google.com/docs/reference/rest/database/user-auth creds = GoogleCredentials.get_application_default().create_scoped( _FIREBASE_SCOPES) creds.authorize(http) return http class MessagesBroadcast(): """Given an array of messages, broadcast it to all users who have opened the UI.""" message = None def __init__(self, messages): self.messages = messages def encode_messages(self): struct_encoded = [] for message in self.messages: struct_encoded.append(message_to_struct(message)) return json.dumps(struct_encoded) def send_messages(self, dest): str_message = self.encode_messages() url = '{}/channels/{}.json'.format(_get_firebase_db_url(), dest) _get_http().request(url, 'PUT', body=str_message) def send(self): # Iterate over all logged in users and attempt to forward the message to # them: session_query = Session.query(ancestor=sessions_key()) for session in session_query: self.send_messages(session.client_id) class SendMessage(webapp2.RequestHandler): """Handler for the /send POST request.""" def post(self): user = users.get_current_user() if not user: # This should never happen, as AppEngine should only run this # handler if the user is signed in. But defense in depth applies... self.redirect(users.create_login_url(self.request.uri)) return # Create a Message and store it in the DataStore. # # We set the same parent key on the 'Message' to ensure each Message is # in the same entity group. Queries across the single entity group will # be consistent. However, the write rate to a single entity group should # be limited to ~1/second. message = Message(parent=messages_key()) topic = self.request.get('topic', DEFAULT_TOPIC) message.topic = topic message.author = Author( identity=user.user_id(), nickname=user.nickname(), email=user.email()) message.content = self.request.get('content') message.date = datetime.datetime.now() message_key = message.put() # Index the message so it is available for future searches: messageindex.add(message_key.urlsafe(), message) # Now that we've recorded the message in the DataStore, broadcast it to # all open clients. broadcast = MessagesBroadcast([message]) broadcast.send() class GetMessages(webapp2.RequestHandler): """Handler for the /get POST request.""" def post(self): user = users.get_current_user() if not user: # This should never happen, as AppEngine should only run this # handler if the user is signed in. But defense in depth applies... self.redirect(users.create_login_url(self.request.uri)) return older_than_id = self.request.get('older_than') older_than = datetime.datetime.strptime(older_than_id, "%Y-%m-%dT%H:%M:%S.%f") query = Message.query(ancestor=messages_key()).filter(Message.date < older_than ).order(-Message.date) # Limit query to 50 messages: query_results = query.fetch(50) if len(query_results) > 0: broadcast = MessagesBroadcast(query_results) broadcast.send_messages(user.user_id()) app = webapp2.WSGIApplication([ ('/', MainPage), ('/send', SendMessage), ('/get', GetMessages), ('/search', SearchPage), ], debug=True)
{ "content_hash": "7be9b954c9650a6e581d45c42119e780", "timestamp": "", "source": "github", "line_count": 339, "max_line_length": 87, "avg_line_length": 35.50737463126843, "alnum_prop": 0.6361219572983301, "repo_name": "colohan/dschat", "id": "9f8f5bfb7311c2a5d59c0ea7b240813b1e05036d", "size": "12037", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "appengine/dschat.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "89" }, { "name": "HTML", "bytes": "4908" }, { "name": "JavaScript", "bytes": "8489" }, { "name": "Python", "bytes": "21689" } ], "symlink_target": "" }
import argparse import csv import io import operator import os import sys from oslo_log import log as logging import yaml from ZanataUtils import IniConfig from ZanataUtils import ZanataRestService LOG = logging.getLogger(__name__) class ZanataAccounts(object): """Object that retrieves Zanata account information. Retrieve name and e-mail address using Zanata ID. Attributes: zconfig (IniConfig): zanata.ini values verify (Bool): True if communicating with non-SSL server """ def __init__(self, zconfig, verify, **kwargs): accept = 'application/vnd.zanata.account+json' content_type = 'application/json' self.rest_service = ZanataRestService(zconfig, accept=accept, content_type=content_type, verify=verify) for key, value in kwargs.items(): setattr(self, key, value) def get_account_data(self, zanata_id): """Get detail account information Retrieve name and e-mail address information by Zanata ID using Zanata's REST API. """ r = self.rest_service.query( '/rest/accounts/u/%s' % (zanata_id)) account_data = r.json() return account_data def _make_language_team(name, team_info): return { 'tag': 'language_team', 'language_code': name, 'language': team_info['language'], # Zanata ID which only consists of numbers is a valid ID # and such entry is interpreted as integer unless it is # quoted in the YAML file. Ensure to stringify them. 'translators': [str(i) for i in team_info['translators']], 'reviewers': [str(i) for i in team_info.get('reviewers', [])], 'coordinators': [str(i) for i in team_info.get('coordinators', [])], } def _make_user(user_id, language_code, language): return { 'user_id': user_id, 'lang_code': language_code, 'lang': language, 'name': '', 'email': '' } def read_language_team_yaml(translation_team_uri, lang_list): LOG.debug('Process list of language team from uri: %s', translation_team_uri) content = yaml.safe_load(io.open(translation_team_uri, 'r')) language_teams = {} if lang_list: lang_notfound = [lang_code for lang_code in lang_list if lang_code not in content] if lang_notfound: print('Language %s not tound in %s.' % (', '.join(lang_notfound), translation_team_uri)) sys.exit(1) for lang_code, team_info in content.items(): if lang_list and lang_code not in lang_list: continue language_teams[lang_code] = _make_language_team(lang_code, team_info) return language_teams def get_zanata_userdata(zc, verify, role, language_teams): print('Getting user data in Zanata...') accounts = ZanataAccounts(zc, verify) users = {} if not role: role = 'translators' for language_code in language_teams: language_team = language_teams[language_code] language_name = language_team['language'] for user in language_team[role]: users[user] = _make_user(user, language_code, language_name) for user_id in users: user = users.get(user_id) print('Getting user detail data for user %(user_id)s' % {'user_id': user_id}) user_data = accounts.get_account_data(user_id) if user_data: user['name'] = user_data['name'] user['email'] = user_data['email'] return users def write_userdata_to_file(users, output_file): userdata = [user for user in sorted(users.values(), key=operator.itemgetter('lang', 'user_id'))] _write_userdata_to_csvfile(userdata, output_file) print('Userdata has been written to %s' % output_file) def _write_userdata_to_csvfile(userdata, output_file): with open(output_file, 'w') as csvfile: writer = csv.writer(csvfile) writer.writerow(['user_id', 'lang_code', 'lang', 'name', 'email']) for data in userdata: d = [data['user_id'], data['lang_code'], data['lang'], data['name'], data['email']] writer.writerow(d) def _comma_separated_list(s): return s.split(',') def main(): # Loads zanata.ini configuration file try: zc = IniConfig(os.path.expanduser('~/.config/zanata.ini')) except ValueError as e: sys.exit(e) # Parses command option(s) parser = argparse.ArgumentParser(description='Generate a csv file which ' 'contains the list of translators for ' 'a specified target role with name and ' 'e-mail address. Require a privilege ' 'to access Zanata accounts API.') parser.add_argument("-o", "--output-file", help=("Specify the output file. " "Default: zanata_userinfo_output.csv.")) parser.add_argument("-r", "--role", help=("Specify the target role. " "Roles: coordinators, translators, reviewers." "Default: translators.")) parser.add_argument("-l", "--lang", type=_comma_separated_list, help=("Specify language(s). Comma-separated list. " "Language code like zh-CN, ja needs to be used. " "Otherwise all languages are processed.")) parser.add_argument('--no-verify', action='store_false', dest='verify', help='Do not perform HTTPS certificate verification') parser.add_argument("user_yaml", help="YAML file of the user list") options = parser.parse_args() # Reads language team information language_teams = read_language_team_yaml(options.user_yaml, options.lang) users = get_zanata_userdata(zc, options.verify, options.role, language_teams) output_file = (options.output_file or 'zanata_userinfo_output.csv') write_userdata_to_file(users, output_file) if __name__ == '__main__': main()
{ "content_hash": "ed0da033bdc754fbe95dd46e64d3da6f", "timestamp": "", "source": "github", "line_count": 189, "max_line_length": 79, "avg_line_length": 34.17460317460318, "alnum_prop": 0.5706765753212572, "repo_name": "openstack/i18n", "id": "9c726312d9cefcf0212be1d71143e5361f549bdf", "size": "7028", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tools/zanata/zanata_userinfo.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "Jinja", "bytes": "447" }, { "name": "Python", "bytes": "36231" }, { "name": "Shell", "bytes": "4599" } ], "symlink_target": "" }
"""Main script to run an experiment. Usage example (run from this directory): python run_experiment.py --config=configs/cifar10_wrn.py """ import functools from absl import app from absl import flags from jax_privacy.src.training.image_classification import experiment from jaxline import platform if __name__ == '__main__': flags.mark_flag_as_required('config') app.run(functools.partial(platform.main, experiment.Experiment))
{ "content_hash": "be65329e7b3dd453fe77947744239466", "timestamp": "", "source": "github", "line_count": 17, "max_line_length": 68, "avg_line_length": 25.823529411764707, "alnum_prop": 0.7608200455580866, "repo_name": "deepmind/jax_privacy", "id": "7b96889b5de898d72f5e26d4d77dd037f421a0b1", "size": "1049", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "experiments/image_classification/run_experiment.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "210177" }, { "name": "Shell", "bytes": "2187" }, { "name": "TeX", "bytes": "279" } ], "symlink_target": "" }
import json import pymongo from flask import request, abort, json, Flask, render_template from flask.ext import restful from flask.ext.restful import reqparse from flask_rest_service import app, api, mongo from bson.objectid import ObjectId from bson.code import Code # ----- / returns status OK and the MongoDB instance if the API is running ----- class Root(restful.Resource): # ----- GET Request ----- def get(self): return { 'status': 'OK', 'mongo': str(mongo.db), } api.add_resource(Root, '/')
{ "content_hash": "b427edfadad8af506b8727c3d3a81239", "timestamp": "", "source": "github", "line_count": 21, "max_line_length": 80, "avg_line_length": 26.19047619047619, "alnum_prop": 0.6618181818181819, "repo_name": "openfoodfacts/OpenFoodFacts-APIRestPython", "id": "05e3562ffa4fa6927a1f76539d7f31af9bd9aab1", "size": "550", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "flask_rest_service/resources_root.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "209" }, { "name": "HTML", "bytes": "5188" }, { "name": "JavaScript", "bytes": "4142" }, { "name": "Python", "bytes": "16190" } ], "symlink_target": "" }
import logging import optparse import os import sys import tempfile import zipfile from util import build_utils def _CreateCombinedMainDexList(main_dex_list_paths): main_dex_list = [] for m in main_dex_list_paths: with open(m) as main_dex_list_file: main_dex_list.extend(l for l in main_dex_list_file if l) return '\n'.join(main_dex_list) def _RemoveUnwantedFilesFromZip(dex_path): iz = zipfile.ZipFile(dex_path, 'r') tmp_dex_path = '%s.tmp.zip' % dex_path oz = zipfile.ZipFile(tmp_dex_path, 'w', zipfile.ZIP_DEFLATED) for i in iz.namelist(): if i.endswith('.dex'): oz.writestr(i, iz.read(i)) os.remove(dex_path) os.rename(tmp_dex_path, dex_path) def _ParseArgs(args): args = build_utils.ExpandFileArgs(args) parser = optparse.OptionParser() build_utils.AddDepfileOption(parser) parser.add_option('--android-sdk-tools', help='Android sdk build tools directory.') parser.add_option('--output-directory', default=os.getcwd(), help='Path to the output build directory.') parser.add_option('--dex-path', help='Dex output path.') parser.add_option('--configuration-name', help='The build CONFIGURATION_NAME.') parser.add_option('--proguard-enabled', help='"true" if proguard is enabled.') parser.add_option('--proguard-enabled-input-path', help=('Path to dex in Release mode when proguard ' 'is enabled.')) parser.add_option('--no-locals', help='Exclude locals list from the dex file.') parser.add_option('--multi-dex', default=False, action='store_true', help='Create multiple dex files.') parser.add_option('--incremental', action='store_true', help='Enable incremental builds when possible.') parser.add_option('--inputs', help='A list of additional input paths.') parser.add_option('--excluded-paths', help='A list of paths to exclude from the dex file.') parser.add_option('--main-dex-list-paths', help='A list of paths containing a list of the classes to ' 'include in the main dex.') options, paths = parser.parse_args(args) required_options = ('android_sdk_tools',) build_utils.CheckOptions(options, parser, required=required_options) if options.multi_dex and not options.main_dex_list_paths: logging.warning('--multi-dex is unused without --main-dex-list-paths') options.multi_dex = False elif options.main_dex_list_paths and not options.multi_dex: logging.warning('--main-dex-list-paths is unused without --multi-dex') if options.main_dex_list_paths: options.main_dex_list_paths = build_utils.ParseGypList( options.main_dex_list_paths) if options.inputs: options.inputs = build_utils.ParseGypList(options.inputs) if options.excluded_paths: options.excluded_paths = build_utils.ParseGypList(options.excluded_paths) return options, paths def _AllSubpathsAreClassFiles(paths, changes): for path in paths: if any(not p.endswith('.class') for p in changes.IterChangedSubpaths(path)): return False return True def _RunDx(changes, options, dex_cmd, paths): with build_utils.TempDir() as classes_temp_dir: # --multi-dex is incompatible with --incremental. if options.multi_dex: combined_main_dex_list = tempfile.NamedTemporaryFile(suffix='.txt') combined_main_dex_list.write( _CreateCombinedMainDexList(options.main_dex_list_paths)) combined_main_dex_list.flush() dex_cmd.append('--main-dex-list=%s' % combined_main_dex_list.name) else: # Use --incremental when .class files are added or modified (never when # removed). # --incremental tells dx to merge all newly dex'ed .class files with # what that already exist in the output dex file (existing classes are # replaced). if options.incremental and changes.AddedOrModifiedOnly(): changed_inputs = set(changes.IterChangedPaths()) changed_paths = [p for p in paths if p in changed_inputs] if not changed_paths: return # When merging in other dex files, there's no easy way to know if # classes were removed from them. if _AllSubpathsAreClassFiles(changed_paths, changes): dex_cmd.append('--incremental') for path in changed_paths: changed_subpaths = set(changes.IterChangedSubpaths(path)) # Not a fundamental restriction, but it's the case right now and it # simplifies the logic to assume so. assert changed_subpaths, 'All inputs should be zip files.' build_utils.ExtractAll(path, path=classes_temp_dir, predicate=lambda p: p in changed_subpaths) paths = [classes_temp_dir] dex_cmd += paths build_utils.CheckOutput(dex_cmd, print_stderr=False) if options.dex_path.endswith('.zip'): _RemoveUnwantedFilesFromZip(options.dex_path) def _OnStaleMd5(changes, options, dex_cmd, paths): _RunDx(changes, options, dex_cmd, paths) build_utils.WriteJson( [os.path.relpath(p, options.output_directory) for p in paths], options.dex_path + '.inputs') def main(args): options, paths = _ParseArgs(args) if (options.proguard_enabled == 'true' and options.configuration_name == 'Release'): paths = [options.proguard_enabled_input_path] if options.inputs: paths += options.inputs if options.excluded_paths: # Excluded paths are relative to the output directory. exclude_paths = options.excluded_paths paths = [p for p in paths if not os.path.relpath(p, options.output_directory) in exclude_paths] input_paths = list(paths) dx_binary = os.path.join(options.android_sdk_tools, 'dx') # See http://crbug.com/272064 for context on --force-jumbo. # See https://github.com/android/platform_dalvik/commit/dd140a22d for # --num-threads. dex_cmd = [dx_binary, '--num-threads=8', '--dex', '--force-jumbo', '--output', options.dex_path] if options.no_locals != '0': dex_cmd.append('--no-locals') if options.multi_dex: input_paths.extend(options.main_dex_list_paths) dex_cmd += [ '--multi-dex', '--minimal-main-dex', ] output_paths = [ options.dex_path, options.dex_path + '.inputs', ] # An escape hatch to be able to check if incremental dexing is causing # problems. force = int(os.environ.get('DISABLE_INCREMENTAL_DX', 0)) build_utils.CallAndWriteDepfileIfStale( lambda changes: _OnStaleMd5(changes, options, dex_cmd, paths), options, input_paths=input_paths, input_strings=dex_cmd, output_paths=output_paths, force=force, pass_changes=True) if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
{ "content_hash": "187a0667be587cf12a81c91141b7373a", "timestamp": "", "source": "github", "line_count": 192, "max_line_length": 80, "avg_line_length": 36.135416666666664, "alnum_prop": 0.6532141827616028, "repo_name": "Teamxrtc/webrtc-streaming-node", "id": "59425d705955f8116f23fbceea332f933191cb6e", "size": "7125", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "third_party/webrtc/src/chromium/src/build/android/gyp/dex.py", "mode": "33261", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "44" }, { "name": "C++", "bytes": "221840" }, { "name": "HTML", "bytes": "2383" }, { "name": "JavaScript", "bytes": "37396" }, { "name": "Python", "bytes": "2860" }, { "name": "Shell", "bytes": "104" } ], "symlink_target": "" }
from setuptools import setup, find_packages setup( name='pinax', version='0.7.0', description='Ariels fork of Pinax, a platform for rapidly developing websites.', author='Ariel Nunez', author_email='ingenieroariel@gmail.com', url='http://github.com/ingenieroariel/pinax/tree/master', packages=find_packages(), classifiers=[ 'Development Status :: 3 - Alpha', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Framework :: Django', ], # Make setuptools include all data files under version control, # svn and CVS by default include_package_data=True, # Tells setuptools to download setuptools_git before running setup.py so # it can find the data files under Git version control. setup_requires=['setuptools_git'], zip_safe=False, )
{ "content_hash": "16687afdd622ad0afdc176ec19a66bd1", "timestamp": "", "source": "github", "line_count": 27, "max_line_length": 84, "avg_line_length": 36.55555555555556, "alnum_prop": 0.662613981762918, "repo_name": "ingenieroariel/pinax", "id": "bbcf196ad9857424d738fffe00f1b7889237475e", "size": "987", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "setup.py", "mode": "33188", "license": "mit", "language": [ { "name": "JavaScript", "bytes": "3140" }, { "name": "Python", "bytes": "520245" } ], "symlink_target": "" }
r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data interchange format. :mod:`json` exposes an API familiar to users of the standard library :mod:`marshal` and :mod:`pickle` modules. It is derived from a version of the externally maintained simplejson library. Encoding basic Python object hierarchies:: >>> import json >>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}]) '["foo", {"bar": ["baz", null, 1.0, 2]}]' >>> print(json.dumps("\"foo\bar")) "\"foo\bar" >>> print(json.dumps('\u1234')) "\u1234" >>> print(json.dumps('\\')) "\\" >>> print(json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)) {"a": 0, "b": 0, "c": 0} >>> from io import StringIO >>> io = StringIO() >>> json.dump(['streaming API'], io) >>> io.getvalue() '["streaming API"]' Compact encoding:: >>> import json >>> from collections import OrderedDict >>> mydict = OrderedDict([('4', 5), ('6', 7)]) >>> json.dumps([1,2,3,mydict], separators=(',', ':')) '[1,2,3,{"4":5,"6":7}]' Pretty printing:: >>> import json >>> print(json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4)) { "4": 5, "6": 7 } Decoding JSON:: >>> import json >>> obj = ['foo', {'bar': ['baz', None, 1.0, 2]}] >>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj True >>> json.loads('"\\"foo\\bar"') == '"foo\x08ar' True >>> from io import StringIO >>> io = StringIO('["streaming API"]') >>> json.load(io)[0] == 'streaming API' True Specializing JSON object decoding:: >>> import json >>> def as_complex(dct): ... if '__complex__' in dct: ... return complex(dct['real'], dct['imag']) ... return dct ... >>> json.loads('{"__complex__": true, "real": 1, "imag": 2}', ... object_hook=as_complex) (1+2j) >>> from decimal import Decimal >>> json.loads('1.1', parse_float=Decimal) == Decimal('1.1') True Specializing JSON object encoding:: >>> import json >>> def encode_complex(obj): ... if isinstance(obj, complex): ... return [obj.real, obj.imag] ... raise TypeError(repr(obj) + " is not JSON serializable") ... >>> json.dumps(2 + 1j, default=encode_complex) '[2.0, 1.0]' >>> json.JSONEncoder(default=encode_complex).encode(2 + 1j) '[2.0, 1.0]' >>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j)) '[2.0, 1.0]' Using json.tool from the shell to validate and pretty-print:: $ echo '{"json":"obj"}' | python -m json.tool { "json": "obj" } $ echo '{ 1.2:3.4}' | python -m json.tool Expecting property name enclosed in double quotes: line 1 column 3 (char 2) """ __version__ = '2.0.9' __all__ = [ 'dump', 'dumps', 'load', 'loads', 'JSONDecoder', 'JSONDecodeError', 'JSONEncoder', ] __author__ = 'Bob Ippolito <bob@redivi.com>' from .decoder import JSONDecoder, JSONDecodeError from .encoder import JSONEncoder _default_encoder = JSONEncoder( skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, indent=None, separators=None, default=None, ) def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, cls=None, indent=None, separators=None, default=None, sort_keys=False, **kw): """Serialize ``obj`` as a JSON formatted stream to ``fp`` (a ``.write()``-supporting file-like object). If ``skipkeys`` is true then ``dict`` keys that are not basic types (``str``, ``int``, ``float``, ``bool``, ``None``) will be skipped instead of raising a ``TypeError``. If ``ensure_ascii`` is false, then the strings written to ``fp`` can contain non-ASCII characters if they appear in strings contained in ``obj``. Otherwise, all such characters are escaped in JSON strings. If ``check_circular`` is false, then the circular reference check for container types will be skipped and a circular reference will result in an ``OverflowError`` (or worse). If ``allow_nan`` is false, then it will be a ``ValueError`` to serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in strict compliance of the JSON specification, instead of using the JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). If ``indent`` is a non-negative integer, then JSON array elements and object members will be pretty-printed with that indent level. An indent level of 0 will only insert newlines. ``None`` is the most compact representation. If specified, ``separators`` should be an ``(item_separator, key_separator)`` tuple. The default is ``(', ', ': ')`` if *indent* is ``None`` and ``(',', ': ')`` otherwise. To get the most compact JSON representation, you should specify ``(',', ':')`` to eliminate whitespace. ``default(obj)`` is a function that should return a serializable version of obj or raise TypeError. The default simply raises TypeError. If *sort_keys* is true (default: ``False``), then the output of dictionaries will be sorted by key. To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the ``.default()`` method to serialize additional types), specify it with the ``cls`` kwarg; otherwise ``JSONEncoder`` is used. """ # cached encoder if (not skipkeys and ensure_ascii and check_circular and allow_nan and cls is None and indent is None and separators is None and default is None and not sort_keys and not kw): iterable = _default_encoder.iterencode(obj) else: if cls is None: cls = JSONEncoder iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii, check_circular=check_circular, allow_nan=allow_nan, indent=indent, separators=separators, default=default, sort_keys=sort_keys, **kw).iterencode(obj) # could accelerate with writelines in some versions of Python, at # a debuggability cost for chunk in iterable: fp.write(chunk) def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, cls=None, indent=None, separators=None, default=None, sort_keys=False, **kw): """Serialize ``obj`` to a JSON formatted ``str``. If ``skipkeys`` is true then ``dict`` keys that are not basic types (``str``, ``int``, ``float``, ``bool``, ``None``) will be skipped instead of raising a ``TypeError``. If ``ensure_ascii`` is false, then the return value can contain non-ASCII characters if they appear in strings contained in ``obj``. Otherwise, all such characters are escaped in JSON strings. If ``check_circular`` is false, then the circular reference check for container types will be skipped and a circular reference will result in an ``OverflowError`` (or worse). If ``allow_nan`` is false, then it will be a ``ValueError`` to serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in strict compliance of the JSON specification, instead of using the JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). If ``indent`` is a non-negative integer, then JSON array elements and object members will be pretty-printed with that indent level. An indent level of 0 will only insert newlines. ``None`` is the most compact representation. If specified, ``separators`` should be an ``(item_separator, key_separator)`` tuple. The default is ``(', ', ': ')`` if *indent* is ``None`` and ``(',', ': ')`` otherwise. To get the most compact JSON representation, you should specify ``(',', ':')`` to eliminate whitespace. ``default(obj)`` is a function that should return a serializable version of obj or raise TypeError. The default simply raises TypeError. If *sort_keys* is true (default: ``False``), then the output of dictionaries will be sorted by key. To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the ``.default()`` method to serialize additional types), specify it with the ``cls`` kwarg; otherwise ``JSONEncoder`` is used. """ # cached encoder if (not skipkeys and ensure_ascii and check_circular and allow_nan and cls is None and indent is None and separators is None and default is None and not sort_keys and not kw): return _default_encoder.encode(obj) if cls is None: cls = JSONEncoder return cls( skipkeys=skipkeys, ensure_ascii=ensure_ascii, check_circular=check_circular, allow_nan=allow_nan, indent=indent, separators=separators, default=default, sort_keys=sort_keys, **kw).encode(obj) _default_decoder = JSONDecoder(object_hook=None, object_pairs_hook=None) def load(fp, cls=None, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, object_pairs_hook=None, **kw): """Deserialize ``fp`` (a ``.read()``-supporting file-like object containing a JSON document) to a Python object. ``object_hook`` is an optional function that will be called with the result of any object literal decode (a ``dict``). The return value of ``object_hook`` will be used instead of the ``dict``. This feature can be used to implement custom decoders (e.g. JSON-RPC class hinting). ``object_pairs_hook`` is an optional function that will be called with the result of any object literal decoded with an ordered list of pairs. The return value of ``object_pairs_hook`` will be used instead of the ``dict``. This feature can be used to implement custom decoders that rely on the order that the key and value pairs are decoded (for example, collections.OrderedDict will remember the order of insertion). If ``object_hook`` is also defined, the ``object_pairs_hook`` takes priority. To use a custom ``JSONDecoder`` subclass, specify it with the ``cls`` kwarg; otherwise ``JSONDecoder`` is used. """ return loads(fp.read(), cls=cls, object_hook=object_hook, parse_float=parse_float, parse_int=parse_int, parse_constant=parse_constant, object_pairs_hook=object_pairs_hook, **kw) def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, object_pairs_hook=None, **kw): """Deserialize ``s`` (a ``str`` instance containing a JSON document) to a Python object. ``object_hook`` is an optional function that will be called with the result of any object literal decode (a ``dict``). The return value of ``object_hook`` will be used instead of the ``dict``. This feature can be used to implement custom decoders (e.g. JSON-RPC class hinting). ``object_pairs_hook`` is an optional function that will be called with the result of any object literal decoded with an ordered list of pairs. The return value of ``object_pairs_hook`` will be used instead of the ``dict``. This feature can be used to implement custom decoders that rely on the order that the key and value pairs are decoded (for example, collections.OrderedDict will remember the order of insertion). If ``object_hook`` is also defined, the ``object_pairs_hook`` takes priority. ``parse_float``, if specified, will be called with the string of every JSON float to be decoded. By default this is equivalent to float(num_str). This can be used to use another datatype or parser for JSON floats (e.g. decimal.Decimal). ``parse_int``, if specified, will be called with the string of every JSON int to be decoded. By default this is equivalent to int(num_str). This can be used to use another datatype or parser for JSON integers (e.g. float). ``parse_constant``, if specified, will be called with one of the following strings: -Infinity, Infinity, NaN. This can be used to raise an exception if invalid JSON numbers are encountered. To use a custom ``JSONDecoder`` subclass, specify it with the ``cls`` kwarg; otherwise ``JSONDecoder`` is used. The ``encoding`` argument is ignored and deprecated. """ if not isinstance(s, str): raise TypeError('the JSON object must be str, not {!r}'.format( s.__class__.__name__)) if s.startswith(u'\ufeff'): raise JSONDecodeError("Unexpected UTF-8 BOM (decode using utf-8-sig)", s, 0) if (cls is None and object_hook is None and parse_int is None and parse_float is None and parse_constant is None and object_pairs_hook is None and not kw): return _default_decoder.decode(s) if cls is None: cls = JSONDecoder if object_hook is not None: kw['object_hook'] = object_hook if object_pairs_hook is not None: kw['object_pairs_hook'] = object_pairs_hook if parse_float is not None: kw['parse_float'] = parse_float if parse_int is not None: kw['parse_int'] = parse_int if parse_constant is not None: kw['parse_constant'] = parse_constant return cls(**kw).decode(s)
{ "content_hash": "3c497353d82275fcb0db1f840639a945", "timestamp": "", "source": "github", "line_count": 332, "max_line_length": 81, "avg_line_length": 40.05120481927711, "alnum_prop": 0.6406708280063173, "repo_name": "batermj/algorithm-challenger", "id": "ca2c611bd0e48ce4146117345687eca513eeac97", "size": "13297", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "code-analysis/programming_anguage/python/source_codes/Python3.5.9/Python-3.5.9/Lib/json/__init__.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Assembly", "bytes": "655185" }, { "name": "Batchfile", "bytes": "127416" }, { "name": "C", "bytes": "33127630" }, { "name": "C++", "bytes": "1364796" }, { "name": "CSS", "bytes": "3163" }, { "name": "Common Lisp", "bytes": "48962" }, { "name": "DIGITAL Command Language", "bytes": "26402" }, { "name": "DTrace", "bytes": "2196" }, { "name": "Go", "bytes": "26248" }, { "name": "HTML", "bytes": "385719" }, { "name": "Haskell", "bytes": "33612" }, { "name": "Java", "bytes": "1084" }, { "name": "JavaScript", "bytes": "20754" }, { "name": "M4", "bytes": "403992" }, { "name": "Makefile", "bytes": "238185" }, { "name": "Objective-C", "bytes": "4934684" }, { "name": "PHP", "bytes": "3513" }, { "name": "PLSQL", "bytes": "45772" }, { "name": "Perl", "bytes": "649" }, { "name": "PostScript", "bytes": "27606" }, { "name": "PowerShell", "bytes": "21737" }, { "name": "Python", "bytes": "55270625" }, { "name": "R", "bytes": "29951" }, { "name": "Rich Text Format", "bytes": "14551" }, { "name": "Roff", "bytes": "292490" }, { "name": "Ruby", "bytes": "519" }, { "name": "Scala", "bytes": "846446" }, { "name": "Shell", "bytes": "491113" }, { "name": "Swift", "bytes": "881" }, { "name": "TeX", "bytes": "337654" }, { "name": "VBScript", "bytes": "140" }, { "name": "XSLT", "bytes": "153" } ], "symlink_target": "" }
from wtforms.validators import StopValidation, ValidationError from flask_bombril.r import R from extensions import db class TestUser(db.Model): email = db.Column(db.String(), primary_key=True, unique=True) class AlwaysError(object): def __init__(self): pass def __call__(self, form, field): raise ValidationError(R.string.validators.always_error) def raise_with_stop(validator, message=None): if validator.stop: if message: raise StopValidation(message) else: raise StopValidation(validator.message) else: if message: raise ValidationError(message) else: raise ValidationError(validator.message)
{ "content_hash": "3d26ae252c8f7f19d65f9d177d488d85", "timestamp": "", "source": "github", "line_count": 28, "max_line_length": 65, "avg_line_length": 25.714285714285715, "alnum_prop": 0.6625, "repo_name": "marcoprado17/flask-bone", "id": "fa555bc4159b1f569f4962ce5683474ab847d195", "size": "1221", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/flask_bombril/form_validators/utils.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "3196" }, { "name": "HTML", "bytes": "10430" }, { "name": "JavaScript", "bytes": "3983" }, { "name": "Python", "bytes": "96101" }, { "name": "Shell", "bytes": "2801" } ], "symlink_target": "" }
""" Django settings for App_BackEnd project. Generated by 'django-admin startproject' using Django 1.10.4. For more information on this file, see https://docs.djangoproject.com/en/1.10/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.10/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'judlafac92o^%*m9j&7_gfxm_70%#-@ydebt391f-v3lyr@pog' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'rest_framework', 'api', 'corsheaders', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'corsheaders.middleware.CorsMiddleware', 'django.middleware.common.CommonMiddleware', ] CORS_ORIGIN_ALLOW_ALL = True CORS_ALLOW_CREDENTIALS = True CORS_ALLOW_METHODS = ( 'GET', 'POST', 'PUT', 'PATCH', 'DELETE', 'OPTIONS' ) ROOT_URLCONF = 'App_BackEnd.urls' STATICFILES_DIRS = [ os.path.join(BASE_DIR, 'static'), ] TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(BASE_DIR, 'static')], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'App_BackEnd.wsgi.application' # Database # https://docs.djangoproject.com/en/1.10/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.10/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.10/howto/static-files/ STATIC_URL = '/static/' REST_FRAMEWORK = { 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination', 'PAGE_SIZE': 100, 'DEFAULT_AUTHENTICATION_CLASSES': ( 'rest_framework.authentication.BasicAuthentication', 'rest_framework.authentication.SessionAuthentication', ) }
{ "content_hash": "4e96ca70f0b3aef41a8dac348ea590ad", "timestamp": "", "source": "github", "line_count": 149, "max_line_length": 91, "avg_line_length": 25.711409395973153, "alnum_prop": 0.679196032367528, "repo_name": "jiafengwu0301/App_BackEnd", "id": "53c1b4573a233ff34fef95b3bfd4b1fe0a116d11", "size": "3831", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "App_BackEnd/settings.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "HTML", "bytes": "6544" }, { "name": "JavaScript", "bytes": "8711" }, { "name": "Python", "bytes": "13360" } ], "symlink_target": "" }
from __future__ import ( absolute_import, print_function, division, unicode_literals ) import logging import re import sys from datetime import datetime logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger(__name__) CURRENT_YEAR = datetime.today().year LICENSE_BLOB = """Copyright (c) %d The Jaeger Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.""" % CURRENT_YEAR LICENSE_BLOB_LINES_GO = [ ('// ' + l).strip() + '\n' for l in LICENSE_BLOB.split('\n') ] COPYRIGHT_RE = re.compile(r'Copyright \(c\) (\d+)', re.I) def update_go_license(name, force=False): with open(name) as f: orig_lines = list(f) lines = list(orig_lines) found = False changed = False for i, line in enumerate(lines[:5]): m = COPYRIGHT_RE.search(line) if not m: continue found = True year = int(m.group(1)) if year == CURRENT_YEAR: break new_line = COPYRIGHT_RE.sub('Copyright (c) %d' % CURRENT_YEAR, line) assert line != new_line, ('Could not change year in: %s' % line) lines[i] = new_line changed = True break if not found: if 'Code generated by' in lines[0]: lines[1:1] = ['\n'] + LICENSE_BLOB_LINES_GO + ['\n'] else: lines[0:0] = LICENSE_BLOB_LINES_GO + ['\n'] changed = True if changed: with open(name, 'w') as f: for line in lines: f.write(line) print(name) def main(): if len(sys.argv) == 1: print('USAGE: %s FILE ...' % sys.argv[0]) sys.exit(1) for name in sys.argv[1:]: if name.endswith('.go'): try: update_go_license(name) except Exception as error: logger.error('Failed to process file %s', name) logger.exception(error) raise error else: raise NotImplementedError('Unsupported file type: %s' % name) if __name__ == "__main__": main()
{ "content_hash": "39d7916c8daa7be4e4e25cf99531c043", "timestamp": "", "source": "github", "line_count": 91, "max_line_length": 76, "avg_line_length": 27.615384615384617, "alnum_prop": 0.5984878631118186, "repo_name": "NeoCN/jaeger", "id": "0fd5f8b4603704df7984bd3db1ab5969548ab890", "size": "2513", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "scripts/updateLicense.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Go", "bytes": "1017924" }, { "name": "HTML", "bytes": "140" }, { "name": "Makefile", "bytes": "9498" }, { "name": "Python", "bytes": "3515" }, { "name": "Shell", "bytes": "9702" } ], "symlink_target": "" }
"""UDP hole punching server.""" from twisted.internet.protocol import DatagramProtocol from twisted.internet import reactor import sys rendezvous_port = 7654 class ServerProtocol(DatagramProtocol): def __init__(self): """Initialize with empty address list.""" self.addresses = [] def addressString(self, address): """Return a string representation of an address.""" ip, port = address return ':'.join([ip, str(port)]) def datagramReceived(self, datagram, address): """Handle incoming datagram messages.""" print('Got connection') datagram = datagram.decode() if datagram == '0': print('Registration from %s:%d' % address) self.transport.write('ok'.encode(), address) self.addresses.append(address) if len(self.addresses) >= 2: msg_0 = self.addressString(self.addresses[1]) msg_1 = self.addressString(self.addresses[0]) self.transport.write(msg_0.encode(), self.addresses[0]) self.transport.write(msg_1.encode(), self.addresses[1]) self.addresses.pop(0) self.addresses.pop(0) print('Linked peers') def stopProtocol(self): print('Peers linked, disconnected both.') if __name__ == '__main__': reactor.listenUDP(rendezvous_port, ServerProtocol()) print('Listening on *:%d' % (rendezvous_port)) reactor.run()
{ "content_hash": "67ce95d63c4640851f62c353e28cc05f", "timestamp": "", "source": "github", "line_count": 47, "max_line_length": 71, "avg_line_length": 31.574468085106382, "alnum_prop": 0.601078167115903, "repo_name": "intangere/NewHope_X25519_XSalsa20_Poly1305", "id": "bcf85a047d788fd830b49db55cf86e4cc08438d4", "size": "1484", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "twisted_server.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "53614" } ], "symlink_target": "" }
import os import subprocess import sys if len(sys.argv) != 5: print("dump_app_syms.py <dump_syms_exe> <strip_binary>") print(" <binary_with_symbols> <symbols_output>") sys.exit(1) dumpsyms = sys.argv[1] strip_binary = sys.argv[2] infile = sys.argv[3] outfile = sys.argv[4] # Dump only when the output file is out-of-date. if not os.path.isfile(outfile) or \ os.stat(outfile).st_mtime > os.stat(infile).st_mtime: with open(outfile, 'w') as outfileobj: subprocess.check_call([dumpsyms, '-r', infile], stdout=outfileobj) if strip_binary != '0': subprocess.check_call(['strip', infile])
{ "content_hash": "fcc48b0b6a696be19def31149df795d4", "timestamp": "", "source": "github", "line_count": 22, "max_line_length": 70, "avg_line_length": 28.181818181818183, "alnum_prop": 0.6709677419354839, "repo_name": "flutter/buildroot", "id": "f5c2c54a0dda440ddda5815f6f498ecee82040b3", "size": "873", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "build/linux/dump_app_syms.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "507" }, { "name": "C++", "bytes": "30195" }, { "name": "Python", "bytes": "291265" }, { "name": "Shell", "bytes": "85178" }, { "name": "sed", "bytes": "1677" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.core.cache import cache from django.db import models from django.contrib.contenttypes.models import ContentType from guardian.compat import get_user_model from guardian.core import ObjectPermissionChecker from guardian.exceptions import ObjectNotPersisted from guardian.models import Permission import warnings # TODO: consolidate UserObjectPermissionManager and GroupObjectPermissionManager class BaseObjectPermissionManager(models.Manager): def is_generic(self): try: self.model._meta.get_field('object_pk') return True except models.fields.FieldDoesNotExist: return False class UserObjectPermissionManager(BaseObjectPermissionManager): def assign_perm(self, perm, user, obj): """ Assigns permission with given ``perm`` for an instance ``obj`` and ``user``. """ if getattr(obj, 'pk', None) is None: raise ObjectNotPersisted("Object %s needs to be persisted first" % obj) ctype = ContentType.objects.get_for_model(obj) permission = Permission.objects.get(content_type=ctype, codename=perm) kwargs = {'permission': permission, 'user': user} if self.is_generic(): kwargs['content_type'] = ctype kwargs['object_pk'] = obj.pk else: kwargs['content_object'] = obj obj_perm, created = self.get_or_create(**kwargs) # Add to cache check = ObjectPermissionChecker(user) key = check.get_local_cache_key(obj) cache_perms = cache.get(key) if cache_perms is not None and perm not in cache_perms: cache_perms.append(perm) cache.set(key, cache_perms) return obj_perm def assign(self, perm, user, obj): """ Depreciated function name left in for compatibility""" warnings.warn("UserObjectPermissionManager method 'assign' is being renamed to 'assign_perm'. Update your code accordingly as old name will be depreciated in 1.0.5 version.", DeprecationWarning) return self.assign_perm(perm, user, obj) def remove_perm(self, perm, user, obj): """ Removes permission ``perm`` for an instance ``obj`` and given ``user``. Please note that we do NOT fetch object permission from database - we use ``Queryset.delete`` method for removing it. Main implication of this is that ``post_delete`` signals would NOT be fired. """ if getattr(obj, 'pk', None) is None: raise ObjectNotPersisted("Object %s needs to be persisted first" % obj) filters = { 'permission__codename': perm, 'permission__content_type': ContentType.objects.get_for_model(obj), 'user': user, } if self.is_generic(): filters['object_pk'] = obj.pk else: filters['content_object__pk'] = obj.pk self.filter(**filters).delete() #Remove for cache check = ObjectPermissionChecker(user) key = check.get_local_cache_key(obj) cache_perms = cache.get(key) if cache_perms is not None and perm in cache_perms: cache_index = 0 for cache_perm in cache_perms: if perm == cache_perm: cache_perms.pop(cache_index) break cache_index += 1 cache.set(key, cache_perms) def get_for_object(self, user, obj): if getattr(obj, 'pk', None) is None: raise ObjectNotPersisted("Object %s needs to be persisted first" % obj) ctype = ContentType.objects.get_for_model(obj) perms = self.filter( content_type = ctype, user = user, ) return perms class GroupObjectPermissionManager(BaseObjectPermissionManager): def assign_perm(self, perm, group, obj): """ Assigns permission with given ``perm`` for an instance ``obj`` and ``group``. """ if getattr(obj, 'pk', None) is None: raise ObjectNotPersisted("Object %s needs to be persisted first" % obj) ctype = ContentType.objects.get_for_model(obj) permission = Permission.objects.get(content_type=ctype, codename=perm) kwargs = {'permission': permission, 'group': group} if self.is_generic(): kwargs['content_type'] = ctype kwargs['object_pk'] = obj.pk else: kwargs['content_object'] = obj obj_perm, created = self.get_or_create(**kwargs) # Add to cache check = ObjectPermissionChecker(group) key = check.get_local_cache_key(obj) cache_perms = cache.get(key) if cache_perms is not None and perm not in cache_perms: cache_perms.append(perm) cache.set(key, cache_perms) User = get_user_model() users = User.objects.filter(groups = group) for user in users: check = ObjectPermissionChecker(user) key = check.get_local_cache_key(obj) cache.delete(key) return obj_perm def assign(self, perm, user, obj): """ Depreciated function name left in for compatibility""" warnings.warn("UserObjectPermissionManager method 'assign' is being renamed to 'assign_perm'. Update your code accordingly as old name will be depreciated in 1.0.5 version.", DeprecationWarning) return self.assign_perm(perm, user, obj) def remove_perm(self, perm, group, obj): """ Removes permission ``perm`` for an instance ``obj`` and given ``group``. """ if getattr(obj, 'pk', None) is None: raise ObjectNotPersisted("Object %s needs to be persisted first" % obj) filters = { 'permission__codename': perm, 'permission__content_type': ContentType.objects.get_for_model(obj), 'group': group, } if self.is_generic(): filters['object_pk'] = obj.pk else: filters['content_object__pk'] = obj.pk self.filter(**filters).delete() #Remove for cache check = ObjectPermissionChecker(group) key = check.get_local_cache_key(obj) cache_perms = cache.get(key) if cache_perms is not None and perm in cache_perms: cache_index = 0 for cache_perm in cache_perms: if perm == cache_perm: cache_perms.pop(cache_index) break cache_index += 1 cache.set(key, cache_perms) User = get_user_model() users = User.objects.filter(groups = group) for user in users: check = ObjectPermissionChecker(user) key = check.get_local_cache_key(obj) cache.delete(key) def get_for_object(self, group, obj): if getattr(obj, 'pk', None) is None: raise ObjectNotPersisted("Object %s needs to be persisted first" % obj) ctype = ContentType.objects.get_for_model(obj) perms = self.filter( content_type = ctype, group = group, ) return perms
{ "content_hash": "0ac9ea261d453a52cfd34305dd48d9d1", "timestamp": "", "source": "github", "line_count": 201, "max_line_length": 202, "avg_line_length": 36.43283582089552, "alnum_prop": 0.5925167281168919, "repo_name": "mobstac/django-guardian", "id": "1e2eb87df2f747dd0002f28da0fb57aefe3a4313", "size": "7323", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "guardian/managers.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "CSS", "bytes": "13944" }, { "name": "JavaScript", "bytes": "498" }, { "name": "Python", "bytes": "270252" }, { "name": "Shell", "bytes": "3553" } ], "symlink_target": "" }
import os import librosa import numpy from multiprocessing import Pool sample_rate = 44100 def scale(arr): # get the average avg = numpy.average(arr) # scale from 20,20000 to 0,1 return (avg - 20) / (20000 - 20) def analyse(file): print(file) y, sr = librosa.load(file, sr=sample_rate, duration=0.25) centroid = scale(librosa.feature.spectral_centroid(y=y, sr=sr)) bandwidth = scale(librosa.feature.spectral_bandwidth(y=y, sr=sr)) # rolloff = scale(librosa.feature.spectral_rolloff(y=y, sr=sr)) # contrast = scale(librosa.feature.spectral_contrast(y=y, sr=sr)) return numpy.asarray([centroid, bandwidth]) kick_values = analyse("./ref_sounds/Kick.mp3") snare_values = analyse("./ref_sounds/Snare.mp3") hihat_values = analyse("./ref_sounds/Hihat.mp3") open_values = analyse("./ref_sounds/Open.mp3") def get_distance(test_values): test_values = numpy.asarray(test_values) kick_dist = numpy.linalg.norm(test_values - kick_values) snare_dist = numpy.linalg.norm(test_values - snare_values) hihat_dist = numpy.linalg.norm(test_values - hihat_values) open_dist = numpy.linalg.norm(test_values - open_values) return [kick_dist, snare_dist, hihat_dist, open_dist] # print get_distance(snare_values) def process(file_name): return get_distance(analyse(file_name)) if __name__ == "__main__": with open("./data/filenames.txt") as file_names: files = file_names.read().splitlines() limit = None results = [] pool = Pool() results = pool.map(process, files[:limit]) numpy.savetxt("data/analysis.tsv", results, delimiter="\t", fmt="%.6f")
{ "content_hash": "86532aab355054473599c92d409ff88e", "timestamp": "", "source": "github", "line_count": 51, "max_line_length": 79, "avg_line_length": 32.745098039215684, "alnum_prop": 0.6664670658682634, "repo_name": "googlecreativelab/aiexperiments-drum-machine", "id": "7437cdb5256fbf2b2bbf10993ede4184a296efb0", "size": "2240", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "scripts/analysis.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "29049" }, { "name": "HTML", "bytes": "6327" }, { "name": "JavaScript", "bytes": "107603" }, { "name": "Python", "bytes": "8408" } ], "symlink_target": "" }
"""Audio source and audio processing compoenents The two main base classes are :class:`AudioSource` which provides audio and :class:`AudioProcessor` which act as a pipeline processor on another :class:`AudioSource`. """ import asyncio import audioop import collections import time import wave import janus try: import pyaudio except ImportError: # This is a workaround for doc generation where pyaudio cannot be installed # TODO(greghaynes): Only fail open during doc gen pass class NoMoreChunksError(Exception): pass class NoDefaultInputDeviceError(Exception): def __init__(self): super(NoDefaultInputDeviceError, self).__init__( 'No default input device' ) # Using a namedtuple for audio chunks due to their lightweight nature AudioChunk = collections.namedtuple('AudioChunk', ['start_time', 'audio', 'width', 'freq']) """A sequence of audio samples. This is the low level structure used for passing audio. Typically these are obtained from iterating over an :class:`AudioBlock`. In order to make this object use minimal memory it is implemented as a namedtuple. :param start_time: Unix timestamp of the first sample. :type start_time: int :param audio: Bytes array of audio samples. :type audio: bytes :param width: Number of bytes per sample. :type width: int :param freq: Sampling frequency. :type freq: int """ class AudioBlock(object): """An iterator over :class:`AudioChunk`. Blocks are used to deliniate continuous chunks of audio. As an example, when using the :class:`SquelchedSource` audio source a consumer often would like to know at what points the squelch was triggered on and off. """ def __init__(self): self._stopped = asyncio.Event() def __aiter__(self): return self @property def ended(self): return self._stopped.is_set() def end(self): self._stopped.set() async def __anext__(self): if self._stopped.is_set(): raise StopAsyncIteration() chunk_task = asyncio.ensure_future(self._next_chunk()) stop_task = asyncio.ensure_future(self._stopped.wait()) try: done, pending = await asyncio.wait( [chunk_task, stop_task], return_when=asyncio.FIRST_COMPLETED ) for task in pending: task.cancel() if chunk_task.done(): try: return chunk_task.result() except StopAsyncIteration: self.end() raise else: raise StopAsyncIteration() finally: chunk_task.cancel() stop_task.cancel() class QueueAudioBlock(AudioBlock): def __init__(self, queue=None): self._q = queue or asyncio.Queue() super(QueueAudioBlock, self).__init__() async def _next_chunk(self): chunk = await self._q.get() if chunk is None: raise StopAsyncIteration('No more audio chunks') return chunk async def add_chunk(self, chunk): await self._q.put(chunk) def chunk_sample_cnt(chunk): """Number of samples which occured in an AudioChunk :param chunk: The chunk to examine. :type chink: AudioChunk """ return int(len(chunk.audio) / chunk.width) def merge_chunks(chunks): assert(len(chunks) > 0) audio = b''.join([x.audio for x in chunks]) return AudioChunk(chunks[0].start_time, audio, chunks[0].width, chunks[0].freq) def split_chunk(chunk, sample_offset): offset = int(sample_offset * chunk.width) first_audio = memoryview(chunk.audio)[:offset] second_audio = memoryview(chunk.audio)[offset:] first_chunk = AudioChunk( chunk.start_time, first_audio, chunk.width, chunk.freq ) second_chunk = AudioChunk( chunk.start_time, second_audio, chunk.width, chunk.freq ) return first_chunk, second_chunk class EvenChunkIterator(object): """Iterate over chunks from an audio source in even sized increments. :parameter iterator: Iterator over audio chunks. :type iterator: Iterator :parameter chunk_size: Number of samples in resulting chunks :type chunk_size: int """ def __init__(self, iterator, chunk_size): self._iterator = iterator self._chunk_size = chunk_size self._cur_chunk = None def __aiter__(self): return self async def __anext__(self): sample_queue = collections.deque() ret_chunk_size = 0 while ret_chunk_size < self._chunk_size: chunk = self._cur_chunk or await self._iterator.__anext__() self._cur_chunk = None cur_chunk_size = chunk_sample_cnt(chunk) ret_chunk_size += cur_chunk_size sample_queue.append(chunk) if ret_chunk_size > self._chunk_size: # We need to break up the chunk merged_chunk = merge_chunks(sample_queue) ret_chunk, leftover_chunk = split_chunk(merged_chunk, self._chunk_size) self._cur_chunk = leftover_chunk return ret_chunk return merge_chunks(sample_queue) class RememberingIterator(object): def __init__(self, iterator, memory_size): self._iterator = iterator self.memory_size = memory_size self._buff = collections.deque(maxlen=memory_size) def __aiter__(self): return self async def __anext__(self): ret = await self._iterator.__anext__() self._buff.append(ret) return ret def memory(self): return self._buff class _ListenCtxtMgr(object): def __init__(self, source): self._source = source async def __aenter__(self): await self._source.start() async def __aexit__(self, *args): await self._source.stop() class AudioSource(object): """Base class for providing audio. All classes which provide audio in some form implement this class. Audio is obtained by first entering the :func:`listen` context manager and then iterating over the :class:`AudioSource` to obtain :class:`AudioBlock`. """ def __init__(self): self.running = False self._last_block = None def listen(self): """Listen to the AudioSource. :ret: Async context manager which starts and stops the AudioSource. """ return _ListenCtxtMgr(self) async def start(self): """Start the audio source. This is where initialization / opening of audio devices should happen. """ self.running = True async def stop(self): """Stop the audio source. This is where deinitialization / closing of audio devices should happen. """ if self._last_block is not None: self._last_block.end() self.running = False def __aiter__(self): return self async def __anext__(self): self._last_block = await self._next_block() return self._last_block class SingleBlockAudioSource(AudioSource): def __init__(self): super(SingleBlockAudioSource, self).__init__() self._block_returned = False async def _next_block(self): if self._block_returned: raise StopAsyncIteration() else: self._block_returned = True return await self._get_block() class AudioSourceProcessor(AudioSource): """Base class for being a pipeline processor of an :class:`AudioSource` :parameter source: Input source :type source: AudioSource """ def __init__(self, source): super(AudioSourceProcessor, self).__init__() self._source = source async def start(self): """Start the input audio source. This is intended to be called from the base class, not directly. """ await super(AudioSourceProcessor, self).start() await self._source.start() async def stop(self): """Stop the input audio source. This is intended to be called from the base class, not directly. """ await self._source.stop() await super(AudioSourceProcessor, self).stop() class Microphone(AudioSource): """Use a local microphone as an audio source. :parameter audio_format: Sample format, default paInt16 :type audio: PyAudio format :parameter channels: Number of channels in microphone. :type channels: int :parameter rate: Sample frequency :type rate: int :parameter device_ndx: PyAudio device index :type device_ndx: int """ def __init__(self, audio_format=None, channels=1, rate=16000, device_ndx=0): super(Microphone, self).__init__() audio_format = audio_format or pyaudio.paInt16 self._format = audio_format self._channels = channels self._rate = rate self._device_ndx = device_ndx self._pyaudio = None self._stream = None self._stream_queue = None async def start(self): await super(Microphone, self).start() loop = asyncio.get_event_loop() self._stream_queue = janus.Queue(loop=loop) self._pyaudio = pyaudio.PyAudio() self._stream = self._pyaudio.open( input=True, format=self._format, channels=self._channels, rate=self._rate, input_device_index=self._device_ndx, stream_callback=self._stream_callback ) async def stop(self): await self._stream_queue.async_q.put(None) await super(Microphone, self).stop() self._stream.stop_stream() self._stream.close() self._pyaudio.terminate() async def _next_block(self): return QueueAudioBlock(self._stream_queue.async_q) def _stream_callback(self, in_data, frame_count, time_info, status_flags): chunk = AudioChunk(start_time=time_info['input_buffer_adc_time'], audio=in_data, freq=self._rate, width=2) self._stream_queue.sync_q.put(chunk) retflag = pyaudio.paContinue if self.running else pyaudio.paComplete return (None, retflag) class _WaveAudioBlock(AudioBlock): def __init__(self, wave_fp, nframes, samprate, sampwidth, n_channels): super(_WaveAudioBlock, self).__init__() self._wave_fp = wave_fp self._nframes = nframes self._sampwidth = sampwidth self._samprate = samprate self._n_channels = n_channels async def _next_chunk(self): frames = self._wave_fp.readframes(self._nframes) if self._n_channels == 2: frames = audioop.tomono(frames, self._sampwidth, .5, .5) if len(frames) == 0: raise StopAsyncIteration('No more frames in wav') chunk = AudioChunk(0, audio=frames, width=self._sampwidth, freq=self._samprate) return chunk class WaveSource(SingleBlockAudioSource): """Use a wave file as an audio source. :parameter wave_path: Path to wave file. :type wave_path: string :parameter chunk_frames: Chunk size to return from get_chunk :type chunk_frames: int """ def __init__(self, wave_path, chunk_frames=None): super(WaveSource, self).__init__() self._wave_path = wave_path self._chunk_frames = chunk_frames self._wave_fp = None self._width = None self._freq = None self._channels = None self._out_queue = None async def start(self): await super(WaveSource, self).start() self._wave_fp = wave.open(self._wave_path) self._width = self._wave_fp.getsampwidth() self._freq = self._wave_fp.getframerate() self._channels = self._wave_fp.getnchannels() self._out_queue = asyncio.Queue() self._returned_block = False assert(self._channels <= 2) async def stop(self): await self._out_queue.put(None) self._wave_fp.close() await super(WaveSource, self).stop() async def _get_block(self): frame_cnt = self._chunk_frames or self._wave_fp.getnframes() return _WaveAudioBlock(self._wave_fp, frame_cnt, self._freq, self._width, self._channels) class _RateConvertBlock(AudioBlock): def __init__(self, src_block, n_channels, out_rate): super(_RateConvertBlock, self).__init__() self._src_block = src_block self._n_channels = n_channels self._out_rate = out_rate self._state = None async def _next_chunk(self): chunk = await self._src_block.__anext__() new_aud, self._state = audioop.ratecv(chunk.audio, 2, self._n_channels, chunk.freq, self._out_rate, self._state) return AudioChunk(chunk.start_time, new_aud, 2, self._out_rate) class RateConvert(AudioSourceProcessor): def __init__(self, source, n_channels, out_rate): super(RateConvert, self).__init__(source) self._n_channels = n_channels self._out_rate = out_rate async def _next_block(self): src_block = await self._source.__anext__() return _RateConvertBlock(src_block, self._n_channels, self._out_rate) class SquelchedBlock(AudioBlock): def __init__(self, source, squelch_level): super(SquelchedBlock, self).__init__() self._source = source self.squelch_level = squelch_level self._sent_mem = False async def _next_chunk(self): if not self._sent_mem: self._sent_mem = True return merge_chunks(self._source.memory()) async for chunk in self._source: squelch_triggered = SquelchedSource.check_squelch( self.squelch_level, True, self._source.memory() ) if squelch_triggered: return chunk else: raise StopAsyncIteration() raise StopAsyncIteration() class SquelchedSource(AudioSourceProcessor): """Filter out samples below a volume level from an audio source. This is useful to prevent constant transcription attempts of background noise, and also to correctly create a 'trigger window' where transcription attempts are made. A sliding window of prefix_samples size is inspected. When the rms of prefix_samples * sample_size samples surpasses the squelch_level this source begins to emit audio. Once the rms of the sliding window passes below 80% of the squelch level this source stop emitting audio. :parameter source: Input source :type source: AudioSource :parameter sample_size: Size of each sample to inspect. :type sample_size: int :parameter squelch_level: RMS value to trigger squelch :type squelch_level: int :parameter prefix_samples: Number of samples of sample_size to check :type prefix_samples: int """ def __init__(self, source, sample_size=1600, squelch_level=None, prefix_samples=4): super(SquelchedSource, self).__init__(source) self._sample_size = sample_size self.squelch_level = squelch_level self._prefix_samples = prefix_samples self._sample_width = 2 self._src_block = None @staticmethod def check_squelch(level, is_triggered, chunks): rms_vals = [audioop.rms(x.audio, x.width) for x in chunks] median_rms = sorted(rms_vals)[int(len(rms_vals) * .5)] if is_triggered: if median_rms < (level * .8): return False else: return True else: if median_rms > level: return True else: return False async def detect_squelch_level(self, detect_time=10, threshold=.8): start_time = time.time() end_time = start_time + detect_time audio_chunks = collections.deque() async with self._source.listen(): async for block in self._source: if time.time() > end_time: break even_iter = EvenChunkIterator(block, self._sample_size) try: while time.time() < end_time: audio_chunks.append(await even_iter.__anext__()) except StopAsyncIteration: pass rms_vals = [audioop.rms(x.audio, self._sample_width) for x in audio_chunks if len(x.audio) == self._sample_size * self._sample_width] level = sorted(rms_vals)[int(threshold * len(rms_vals)):][0] self.squelch_level = level return level async def start(self): assert(self.squelch_level is not None) await super(SquelchedSource, self).start() async def _next_block(self): if self._src_block is None or self._src_block.ended: self._src_block = await self._source.__anext__() even_iter = EvenChunkIterator(self._src_block, self._sample_size) self._mem_iter = RememberingIterator(even_iter, self._prefix_samples) async for _ in self._mem_iter: # NOQA if SquelchedSource.check_squelch(self.squelch_level, False, self._mem_iter.memory()): return SquelchedBlock(self._mem_iter, self.squelch_level) raise StopAsyncIteration() class AudioPlayer(object): """Play audio from an audio source. This is not generally useful for transcription, but can be very useful in the development of :class:`AudioSource` or :class:`AudioProcessor` classes. :param source: Source to play. :type source: AudioSource :param width: Bytes per sample. :type width: int :param channels: Number of channels in output device. :type channels: int :param freq: Sampling frequency of output device. :type freq: int """ def __init__(self, source, width, channels, freq): self._source = source self._width = width self._channels = channels self._freq = freq async def play(self): """Play audio from source. This method will block until the source runs out of audio. """ p = pyaudio.PyAudio() stream = p.open(format=p.get_format_from_width(self._width), channels=self._channels, rate=self._freq, output=True) async with self._source.listen(): async for block in self._source: async for chunk in block: stream.write(chunk.audio) stream.stop_stream() stream.close() p.terminate()
{ "content_hash": "3a30f815fd112acc549ee5ca911ab918", "timestamp": "", "source": "github", "line_count": 602, "max_line_length": 79, "avg_line_length": 31.995016611295682, "alnum_prop": 0.5940501531592337, "repo_name": "ibm-dev/streamtotext", "id": "8f1cb55769810d5033061708300dc5d783edab91", "size": "19261", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "streamtotext/audio.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "46083" }, { "name": "Shell", "bytes": "215" } ], "symlink_target": "" }
"""One-line documentation for resource_registration module. A detailed description of resource_registration. """ from googlecloudsdk.core import resources def RegisterReleasedAPIs(): """Register all official versions of released Cloud APIs. """ # pylint:disable=g-import-not-at-top from googlecloudapis.bigquery import v2 as bigquery_v2 from googlecloudapis.compute import v1 as compute_v1 from googlecloudapis.developerprojects import v2beta1 as projects_v2beta1 from googlecloudapis.dns import v1beta1 as dns_v1beta1 from googlecloudapis.manager import v1beta2 as manager_v1beta2 from googlecloudapis.replicapool import v1beta1 as replicapool_v1beta1 from googlecloudapis.resourceviews import v1beta1 as resourceviews_v1beta1 from googlecloudapis.sqladmin import v1beta3 as sqladmin_v1beta3 resources.RegisterAPI(bigquery_v2.BigqueryV2(get_credentials=False)) resources.RegisterAPI(compute_v1.ComputeV1(get_credentials=False)) resources.RegisterAPI( projects_v2beta1.DeveloperprojectsV2beta1(get_credentials=False)) resources.RegisterAPI(dns_v1beta1.DnsV1beta1(get_credentials=False)) resources.RegisterAPI(manager_v1beta2.ManagerV1beta2(get_credentials=False)) resources.RegisterAPI( replicapool_v1beta1.ReplicapoolV1beta1(get_credentials=False)) resources.RegisterAPI( resourceviews_v1beta1.ResourceviewsV1beta1(get_credentials=False)) resources.RegisterAPI(sqladmin_v1beta3.SqladminV1beta3(get_credentials=False)) from googlecloudapis.autoscaler import v1beta2 as autoscaler_v1beta2 resources.RegisterAPI( autoscaler_v1beta2.AutoscalerV1beta2(get_credentials=False)) from googlecloudapis.replicapool import v1beta2 as replicapool_v1beta2 resources.RegisterAPI( replicapool_v1beta2.ReplicapoolV1beta2(get_credentials=False)) from googlecloudapis.replicapoolupdater import v1beta1 as updater_v1beta1 resources.RegisterAPI( updater_v1beta1.ReplicapoolupdaterV1beta1(get_credentials=False)) def RegisterUnreleasedAPIs(): pass
{ "content_hash": "4c1bad65bf0a3335b44f7ff4771f563d", "timestamp": "", "source": "github", "line_count": 45, "max_line_length": 80, "avg_line_length": 44.93333333333333, "alnum_prop": 0.820969337289812, "repo_name": "Plantain/sms-mailinglist", "id": "e1c3bb3127943273563d9b0e20b38ad1e4029b64", "size": "2073", "binary": false, "copies": "5", "ref": "refs/heads/master", "path": "lib/googlecloudsdk/core/util/resource_registration.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "Erlang", "bytes": "1479" }, { "name": "Perl", "bytes": "6919" }, { "name": "Python", "bytes": "4968506" }, { "name": "R", "bytes": "274" }, { "name": "Shell", "bytes": "1540" } ], "symlink_target": "" }
from __future__ import (absolute_import, division, print_function) # Copyright 2019 Fortinet, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. __metaclass__ = type ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'metadata_version': '1.1'} DOCUMENTATION = ''' --- module: fortios_vpn_ipsec_forticlient short_description: Configure FortiClient policy realm in Fortinet's FortiOS and FortiGate. description: - This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the user to set and modify vpn_ipsec feature and forticlient category. Examples include all parameters and values need to be adjusted to datasources before usage. Tested with FOS v6.0.5 version_added: "2.8" author: - Miguel Angel Munoz (@mamunozgonzalez) - Nicolas Thomas (@thomnico) notes: - Requires fortiosapi library developed by Fortinet - Run as a local_action in your playbook requirements: - fortiosapi>=0.9.8 options: host: description: - FortiOS or FortiGate IP address. type: str required: false username: description: - FortiOS or FortiGate username. type: str required: false password: description: - FortiOS or FortiGate password. type: str default: "" vdom: description: - Virtual domain, among those defined previously. A vdom is a virtual instance of the FortiGate that can be configured and used as a different unit. type: str default: root https: description: - Indicates if the requests towards FortiGate must use HTTPS protocol. type: bool default: true ssl_verify: description: - Ensures FortiGate certificate must be verified by a proper CA. type: bool default: true version_added: 2.9 state: description: - Indicates whether to create or remove the object. This attribute was present already in previous version in a deeper level. It has been moved out to this outer level. type: str required: false choices: - present - absent version_added: 2.9 vpn_ipsec_forticlient: description: - Configure FortiClient policy realm. default: null type: dict suboptions: state: description: - B(Deprecated) - Starting with Ansible 2.9 we recommend using the top-level 'state' parameter. - HORIZONTALLINE - Indicates whether to create or remove the object. type: str required: false choices: - present - absent phase2name: description: - Phase 2 tunnel name that you defined in the FortiClient dialup configuration. Source vpn.ipsec.phase2.name vpn.ipsec.phase2-interface .name. type: str realm: description: - FortiClient realm name. required: true type: str status: description: - Enable/disable this FortiClient configuration. type: str choices: - enable - disable usergroupname: description: - User group name for FortiClient users. Source user.group.name. type: str ''' EXAMPLES = ''' - hosts: localhost vars: host: "192.168.122.40" username: "admin" password: "" vdom: "root" ssl_verify: "False" tasks: - name: Configure FortiClient policy realm. fortios_vpn_ipsec_forticlient: host: "{{ host }}" username: "{{ username }}" password: "{{ password }}" vdom: "{{ vdom }}" https: "False" state: "present" vpn_ipsec_forticlient: phase2name: "<your_own_value> (source vpn.ipsec.phase2.name vpn.ipsec.phase2-interface.name)" realm: "<your_own_value>" status: "enable" usergroupname: "<your_own_value> (source user.group.name)" ''' RETURN = ''' build: description: Build number of the fortigate image returned: always type: str sample: '1547' http_method: description: Last method used to provision the content into FortiGate returned: always type: str sample: 'PUT' http_status: description: Last result given by FortiGate on last operation applied returned: always type: str sample: "200" mkey: description: Master key (id) used in the last call to FortiGate returned: success type: str sample: "id" name: description: Name of the table used to fulfill the request returned: always type: str sample: "urlfilter" path: description: Path of the table used to fulfill the request returned: always type: str sample: "webfilter" revision: description: Internal revision number returned: always type: str sample: "17.0.2.10658" serial: description: Serial number of the unit returned: always type: str sample: "FGVMEVYYQT3AB5352" status: description: Indication of the operation's result returned: always type: str sample: "success" vdom: description: Virtual domain used returned: always type: str sample: "root" version: description: Version of the FortiGate returned: always type: str sample: "v5.6.3" ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.connection import Connection from ansible.module_utils.network.fortios.fortios import FortiOSHandler from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG def login(data, fos): host = data['host'] username = data['username'] password = data['password'] ssl_verify = data['ssl_verify'] fos.debug('on') if 'https' in data and not data['https']: fos.https('off') else: fos.https('on') fos.login(host, username, password, verify=ssl_verify) def filter_vpn_ipsec_forticlient_data(json): option_list = ['phase2name', 'realm', 'status', 'usergroupname'] dictionary = {} for attribute in option_list: if attribute in json and json[attribute] is not None: dictionary[attribute] = json[attribute] return dictionary def underscore_to_hyphen(data): if isinstance(data, list): for elem in data: elem = underscore_to_hyphen(elem) elif isinstance(data, dict): new_data = {} for k, v in data.items(): new_data[k.replace('_', '-')] = underscore_to_hyphen(v) data = new_data return data def vpn_ipsec_forticlient(data, fos): vdom = data['vdom'] if 'state' in data and data['state']: state = data['state'] elif 'state' in data['vpn_ipsec_forticlient'] and data['vpn_ipsec_forticlient']: state = data['vpn_ipsec_forticlient']['state'] else: state = True vpn_ipsec_forticlient_data = data['vpn_ipsec_forticlient'] filtered_data = underscore_to_hyphen(filter_vpn_ipsec_forticlient_data(vpn_ipsec_forticlient_data)) if state == "present": return fos.set('vpn.ipsec', 'forticlient', data=filtered_data, vdom=vdom) elif state == "absent": return fos.delete('vpn.ipsec', 'forticlient', mkey=filtered_data['realm'], vdom=vdom) def is_successful_status(status): return status['status'] == "success" or \ status['http_method'] == "DELETE" and status['http_status'] == 404 def fortios_vpn_ipsec(data, fos): if data['vpn_ipsec_forticlient']: resp = vpn_ipsec_forticlient(data, fos) return not is_successful_status(resp), \ resp['status'] == "success", \ resp def main(): fields = { "host": {"required": False, "type": "str"}, "username": {"required": False, "type": "str"}, "password": {"required": False, "type": "str", "default": "", "no_log": True}, "vdom": {"required": False, "type": "str", "default": "root"}, "https": {"required": False, "type": "bool", "default": True}, "ssl_verify": {"required": False, "type": "bool", "default": True}, "state": {"required": False, "type": "str", "choices": ["present", "absent"]}, "vpn_ipsec_forticlient": { "required": False, "type": "dict", "default": None, "options": { "state": {"required": False, "type": "str", "choices": ["present", "absent"]}, "phase2name": {"required": False, "type": "str"}, "realm": {"required": True, "type": "str"}, "status": {"required": False, "type": "str", "choices": ["enable", "disable"]}, "usergroupname": {"required": False, "type": "str"} } } } module = AnsibleModule(argument_spec=fields, supports_check_mode=False) # legacy_mode refers to using fortiosapi instead of HTTPAPI legacy_mode = 'host' in module.params and module.params['host'] is not None and \ 'username' in module.params and module.params['username'] is not None and \ 'password' in module.params and module.params['password'] is not None if not legacy_mode: if module._socket_path: connection = Connection(module._socket_path) fos = FortiOSHandler(connection) is_error, has_changed, result = fortios_vpn_ipsec(module.params, fos) else: module.fail_json(**FAIL_SOCKET_MSG) else: try: from fortiosapi import FortiOSAPI except ImportError: module.fail_json(msg="fortiosapi module is required") fos = FortiOSAPI() login(module.params, fos) is_error, has_changed, result = fortios_vpn_ipsec(module.params, fos) fos.logout() if not is_error: module.exit_json(changed=has_changed, meta=result) else: module.fail_json(msg="Error in repo", meta=result) if __name__ == '__main__': main()
{ "content_hash": "66fa3f504ce1657798d2bdac6274d3a7", "timestamp": "", "source": "github", "line_count": 354, "max_line_length": 155, "avg_line_length": 31.51412429378531, "alnum_prop": 0.5928648261025458, "repo_name": "thaim/ansible", "id": "8d7f48f2f80a2b66dd5793963382897a41a38822", "size": "11174", "binary": false, "copies": "13", "ref": "refs/heads/fix-broken-link", "path": "lib/ansible/modules/network/fortios/fortios_vpn_ipsec_forticlient.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "7" }, { "name": "Shell", "bytes": "246" } ], "symlink_target": "" }
from nose.tools import ( assert_equals, assert_not_equals, assert_true ) from random import randint import os from rsk_mind.classifier import XGBoostClassifier from rsk_mind.dataset import ( Splitter, Dataset ) class TestXGBoostClassifier(object): @classmethod def setup_class(cls): cls.nr_columns = 11 cls.nr_rows = 100 header = [] for i in range(cls.nr_columns-1): header.append("f_{}".format(i)) header.append("target") rows = [] for row_idx in range(cls.nr_rows): row = [] for col_idx in range(cls.nr_columns): row.append(randint(0, 1)) rows.append(row) cls.original_dataset = Dataset(header, rows) cls.original_dataset.transformed_header = list(header) cls.original_dataset.transformed_rows = list(rows) _splitter = Splitter(cls.original_dataset) cls.training_dataset = _splitter.training_dataset cls.validation_dataset = _splitter.validation_dataset cls.test_dataset = _splitter.test_dataset cls.model_path = os.path.join(os.getcwd(),'xgb_model.bin') cls.test_classifier = XGBoostClassifier() @classmethod def teardown_class(cls): del cls.training_dataset del cls.validation_dataset del cls.test_dataset del cls.original_dataset if os.path.exists(cls.model_path): os.remove(cls.model_path) del cls.model_path del cls.test_classifier def test_01_empty_training_dataset_of_classifier(self): assert_equals(self.test_classifier.training_dataset, None) def test_02_set_training_dataset_to_classifier(self): self.test_classifier.training_dataset = self.training_dataset assert_not_equals(self.test_classifier.training_dataset, None) def test_03_empty_validation_dataset_of_classifier(self): assert_equals(self.test_classifier.validation_dataset, None) def test_04_set_validation_dataset_to_classifier(self): self.test_classifier.validation_dataset = self.validation_dataset assert_not_equals(self.test_classifier.validation_dataset, None) def test_05_empty_test_dataset_of_classifier(self): assert_equals(self.test_classifier.test_dataset, None) def test_06_set_test_dataset_to_classifier(self): self.test_classifier.test_dataset = self.test_dataset assert_not_equals(self.test_classifier.test_dataset, None) def test_07_train(self): self.test_classifier.train() assert_not_equals(self.test_classifier.model, None) def test_08_save_model(self): self.test_classifier.save_model(self.model_path) save_operation = os.path.exists(self.model_path) and os.path.isfile(self.model_path) assert_equals(save_operation, True) def test_09_evaluate(self): summary = self.test_classifier.evaluate(0.05) assert_not_equals(summary, None) def test_10_predict(self): instance = [] for i in range(self.nr_columns): instance.append(randint(0, 1)) score = self.test_classifier.predict(instance) assert_true(score >= 0 and score <= 1) def test_11_load_model(self): self.test_classifier.model = None assert_equals(self.test_classifier.model, None) self.test_classifier.load_model(self.model_path) assert_not_equals(self.test_classifier.model, None)
{ "content_hash": "491c84430d78aee94decfa0d8acb443e", "timestamp": "", "source": "github", "line_count": 103, "max_line_length": 92, "avg_line_length": 33.89320388349515, "alnum_prop": 0.6582641077055285, "repo_name": "rsk-mind/rsk-mind-framework", "id": "9252077d88cfe548a48c6f1e67e470cac72245ae", "size": "3491", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/classifier/test_xgboost_classifier.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "83888" } ], "symlink_target": "" }
"""Generic presubmit checks that can be reused by other presubmit checks.""" import os as _os _HERE = _os.path.dirname(_os.path.abspath(__file__)) # Justifications for each filter: # # - build/include : Too many; fix in the future. # - build/include_order : Not happening; #ifdefed includes. # - build/namespace : I'm surprised by how often we violate this rule. # - readability/casting : Mistakes a whole bunch of function pointer. # - runtime/int : Can be fixed long term; volume of errors too high # - runtime/virtual : Broken now, but can be fixed in the future? # - whitespace/braces : We have a lot of explicit scoping in chrome code. DEFAULT_LINT_FILTERS = [ '-build/include', '-build/include_order', '-build/namespace', '-readability/casting', '-runtime/int', '-runtime/virtual', '-whitespace/braces', ] # These filters will always be removed, even if the caller specifies a filter # set, as they are problematic or broken in some way. # # Justifications for each filter: # - build/c++11 : Rvalue ref checks are unreliable (false positives), # include file and feature blacklists are # google3-specific. BLACKLIST_LINT_FILTERS = [ '-build/c++11', ] ### Description checks def CheckChangeHasTestField(input_api, output_api): """Requires that the changelist have a TEST= field.""" if input_api.change.TEST: return [] else: return [output_api.PresubmitNotifyResult( 'If this change requires manual test instructions to QA team, add ' 'TEST=[instructions].')] def CheckChangeHasBugField(input_api, output_api): """Requires that the changelist have a BUG= field.""" if input_api.change.BUG: return [] else: return [output_api.PresubmitNotifyResult( 'If this change has an associated bug, add BUG=[bug number].')] def CheckChangeHasTestedField(input_api, output_api): """Requires that the changelist have a TESTED= field.""" if input_api.change.TESTED: return [] else: return [output_api.PresubmitError('Changelist must have a TESTED= field.')] def CheckChangeHasQaField(input_api, output_api): """Requires that the changelist have a QA= field.""" if input_api.change.QA: return [] else: return [output_api.PresubmitError('Changelist must have a QA= field.')] def CheckDoNotSubmitInDescription(input_api, output_api): """Checks that the user didn't add 'DO NOT ''SUBMIT' to the CL description. """ keyword = 'DO NOT ''SUBMIT' if keyword in input_api.change.DescriptionText(): return [output_api.PresubmitError( keyword + ' is present in the changelist description.')] else: return [] def CheckChangeHasDescription(input_api, output_api): """Checks the CL description is not empty.""" text = input_api.change.DescriptionText() if text.strip() == '': if input_api.is_committing: return [output_api.PresubmitError('Add a description to the CL.')] else: return [output_api.PresubmitNotifyResult('Add a description to the CL.')] return [] def CheckChangeWasUploaded(input_api, output_api): """Checks that the issue was uploaded before committing.""" if input_api.is_committing and not input_api.change.issue: return [output_api.PresubmitError( 'Issue wasn\'t uploaded. Please upload first.')] return [] ### Content checks def CheckDoNotSubmitInFiles(input_api, output_api): """Checks that the user didn't add 'DO NOT ''SUBMIT' to any files.""" # We want to check every text file, not just source files. file_filter = lambda x : x keyword = 'DO NOT ''SUBMIT' errors = _FindNewViolationsOfRule(lambda _, line : keyword not in line, input_api, file_filter) text = '\n'.join('Found %s in %s' % (keyword, loc) for loc in errors) if text: return [output_api.PresubmitError(text)] return [] def CheckChangeLintsClean(input_api, output_api, source_file_filter=None, lint_filters=None, verbose_level=None): """Checks that all '.cc' and '.h' files pass cpplint.py.""" _RE_IS_TEST = input_api.re.compile(r'.*tests?.(cc|h)$') result = [] cpplint = input_api.cpplint # Access to a protected member _XX of a client class # pylint: disable=W0212 cpplint._cpplint_state.ResetErrorCounts() lint_filters = lint_filters or DEFAULT_LINT_FILTERS lint_filters.extend(BLACKLIST_LINT_FILTERS) cpplint._SetFilters(','.join(lint_filters)) # We currently are more strict with normal code than unit tests; 4 and 5 are # the verbosity level that would normally be passed to cpplint.py through # --verbose=#. Hopefully, in the future, we can be more verbose. files = [f.AbsoluteLocalPath() for f in input_api.AffectedSourceFiles(source_file_filter)] for file_name in files: if _RE_IS_TEST.match(file_name): level = 5 else: level = 4 verbose_level = verbose_level or level cpplint.ProcessFile(file_name, verbose_level) if cpplint._cpplint_state.error_count > 0: if input_api.is_committing: res_type = output_api.PresubmitError else: res_type = output_api.PresubmitPromptWarning result = [res_type('Changelist failed cpplint.py check.')] return result def CheckChangeHasNoCR(input_api, output_api, source_file_filter=None): """Checks no '\r' (CR) character is in any source files.""" cr_files = [] for f in input_api.AffectedSourceFiles(source_file_filter): if '\r' in input_api.ReadFile(f, 'rb'): cr_files.append(f.LocalPath()) if cr_files: return [output_api.PresubmitPromptWarning( 'Found a CR character in these files:', items=cr_files)] return [] def CheckChangeHasOnlyOneEol(input_api, output_api, source_file_filter=None): """Checks the files ends with one and only one \n (LF).""" eof_files = [] for f in input_api.AffectedSourceFiles(source_file_filter): contents = input_api.ReadFile(f, 'rb') # Check that the file ends in one and only one newline character. if len(contents) > 1 and (contents[-1:] != '\n' or contents[-2:-1] == '\n'): eof_files.append(f.LocalPath()) if eof_files: return [output_api.PresubmitPromptWarning( 'These files should end in one (and only one) newline character:', items=eof_files)] return [] def CheckChangeHasNoCrAndHasOnlyOneEol(input_api, output_api, source_file_filter=None): """Runs both CheckChangeHasNoCR and CheckChangeHasOnlyOneEOL in one pass. It is faster because it is reading the file only once. """ cr_files = [] eof_files = [] for f in input_api.AffectedSourceFiles(source_file_filter): contents = input_api.ReadFile(f, 'rb') if '\r' in contents: cr_files.append(f.LocalPath()) # Check that the file ends in one and only one newline character. if len(contents) > 1 and (contents[-1:] != '\n' or contents[-2:-1] == '\n'): eof_files.append(f.LocalPath()) outputs = [] if cr_files: outputs.append(output_api.PresubmitPromptWarning( 'Found a CR character in these files:', items=cr_files)) if eof_files: outputs.append(output_api.PresubmitPromptWarning( 'These files should end in one (and only one) newline character:', items=eof_files)) return outputs def _ReportErrorFileAndLine(filename, line_num, dummy_line): """Default error formatter for _FindNewViolationsOfRule.""" return '%s:%s' % (filename, line_num) def _FindNewViolationsOfRule(callable_rule, input_api, source_file_filter=None, error_formatter=_ReportErrorFileAndLine): """Find all newly introduced violations of a per-line rule (a callable). Arguments: callable_rule: a callable taking a file extension and line of input and returning True if the rule is satisfied and False if there was a problem. input_api: object to enumerate the affected files. source_file_filter: a filter to be passed to the input api. error_formatter: a callable taking (filename, line_number, line) and returning a formatted error string. Returns: A list of the newly-introduced violations reported by the rule. """ errors = [] for f in input_api.AffectedFiles(include_deletes=False, file_filter=source_file_filter): # For speed, we do two passes, checking first the full file. Shelling out # to the SCM to determine the changed region can be quite expensive on # Win32. Assuming that most files will be kept problem-free, we can # skip the SCM operations most of the time. extension = str(f.LocalPath()).rsplit('.', 1)[-1] if all(callable_rule(extension, line) for line in f.NewContents()): continue # No violation found in full text: can skip considering diff. for line_num, line in f.ChangedContents(): if not callable_rule(extension, line): errors.append(error_formatter(f.LocalPath(), line_num, line)) return errors def CheckChangeHasNoTabs(input_api, output_api, source_file_filter=None): """Checks that there are no tab characters in any of the text files to be submitted. """ # In addition to the filter, make sure that makefiles are blacklisted. if not source_file_filter: # It's the default filter. source_file_filter = input_api.FilterSourceFile def filter_more(affected_file): basename = input_api.os_path.basename(affected_file.LocalPath()) return (not (basename in ('Makefile', 'makefile') or basename.endswith('.mk')) and source_file_filter(affected_file)) tabs = _FindNewViolationsOfRule(lambda _, line : '\t' not in line, input_api, filter_more) if tabs: return [output_api.PresubmitPromptWarning('Found a tab character in:', long_text='\n'.join(tabs))] return [] def CheckChangeTodoHasOwner(input_api, output_api, source_file_filter=None): """Checks that the user didn't add TODO(name) without an owner.""" unowned_todo = input_api.re.compile('TO''DO[^(]') errors = _FindNewViolationsOfRule(lambda _, x : not unowned_todo.search(x), input_api, source_file_filter) errors = ['Found TO''DO with no owner in ' + x for x in errors] if errors: return [output_api.PresubmitPromptWarning('\n'.join(errors))] return [] def CheckChangeHasNoStrayWhitespace(input_api, output_api, source_file_filter=None): """Checks that there is no stray whitespace at source lines end.""" errors = _FindNewViolationsOfRule(lambda _, line : line.rstrip() == line, input_api, source_file_filter) if errors: return [output_api.PresubmitPromptWarning( 'Found line ending with white spaces in:', long_text='\n'.join(errors))] return [] def CheckLongLines(input_api, output_api, maxlen, source_file_filter=None): """Checks that there aren't any lines longer than maxlen characters in any of the text files to be submitted. """ maxlens = { 'java': 100, # This is specifically for Android's handwritten makefiles (Android.mk). 'mk': 200, '': maxlen, } # Language specific exceptions to max line length. # '.h' is considered an obj-c file extension, since OBJC_EXCEPTIONS are a # superset of CPP_EXCEPTIONS. CPP_FILE_EXTS = ('c', 'cc') CPP_EXCEPTIONS = ('#define', '#endif', '#if', '#include', '#pragma') JAVA_FILE_EXTS = ('java',) JAVA_EXCEPTIONS = ('import ', 'package ') JS_FILE_EXTS = ('js',) JS_EXCEPTIONS = ("GEN('#include",) OBJC_FILE_EXTS = ('h', 'm', 'mm') OBJC_EXCEPTIONS = ('#define', '#endif', '#if', '#import', '#include', '#pragma') PY_FILE_EXTS = ('py',) PY_EXCEPTIONS = ('import', 'from') LANGUAGE_EXCEPTIONS = [ (CPP_FILE_EXTS, CPP_EXCEPTIONS), (JAVA_FILE_EXTS, JAVA_EXCEPTIONS), (JS_FILE_EXTS, JS_EXCEPTIONS), (OBJC_FILE_EXTS, OBJC_EXCEPTIONS), (PY_FILE_EXTS, PY_EXCEPTIONS), ] def no_long_lines(file_extension, line): # Check for language specific exceptions. if any(file_extension in exts and line.startswith(exceptions) for exts, exceptions in LANGUAGE_EXCEPTIONS): return True file_maxlen = maxlens.get(file_extension, maxlens['']) # Stupidly long symbols that needs to be worked around if takes 66% of line. long_symbol = file_maxlen * 2 / 3 # Hard line length limit at 50% more. extra_maxlen = file_maxlen * 3 / 2 line_len = len(line) if line_len <= file_maxlen: return True # Allow long URLs of any length. if any((url in line) for url in ('file://', 'http://', 'https://')): return True # If 'line-too-long' is explictly suppressed for the line, any length is # acceptable. if 'pylint: disable=line-too-long' in line and file_extension == 'py': return True if line_len > extra_maxlen: return False if 'url(' in line and file_extension == 'css': return True if '<include' in line and file_extension in ('css', 'html', 'js'): return True return input_api.re.match( r'.*[A-Za-z][A-Za-z_0-9]{%d,}.*' % long_symbol, line) def format_error(filename, line_num, line): return '%s, line %s, %s chars' % (filename, line_num, len(line)) errors = _FindNewViolationsOfRule(no_long_lines, input_api, source_file_filter, error_formatter=format_error) if errors: msg = 'Found lines longer than %s characters (first 5 shown).' % maxlen return [output_api.PresubmitPromptWarning(msg, items=errors[:5])] else: return [] def CheckLicense(input_api, output_api, license_re, source_file_filter=None, accept_empty_files=True): """Verifies the license header. """ license_re = input_api.re.compile(license_re, input_api.re.MULTILINE) bad_files = [] for f in input_api.AffectedSourceFiles(source_file_filter): contents = input_api.ReadFile(f, 'rb') if accept_empty_files and not contents: continue if not license_re.search(contents): bad_files.append(f.LocalPath()) if bad_files: return [output_api.PresubmitPromptWarning( 'License must match:\n%s\n' % license_re.pattern + 'Found a bad license header in these files:', items=bad_files)] return [] def CheckChangeSvnEolStyle(input_api, output_api, source_file_filter=None): """Checks that the source files have svn:eol-style=LF.""" return CheckSvnProperty(input_api, output_api, 'svn:eol-style', 'LF', input_api.AffectedSourceFiles(source_file_filter)) def CheckSvnForCommonMimeTypes(input_api, output_api): """Checks that common binary file types have the correct svn:mime-type.""" output = [] files = input_api.AffectedFiles(include_deletes=False) def IsExts(x, exts): path = x.LocalPath() for extension in exts: if path.endswith(extension): return True return False def FilterFiles(extension): return filter(lambda x: IsExts(x, extension), files) def RunCheck(mime_type, files): output.extend(CheckSvnProperty(input_api, output_api, 'svn:mime-type', mime_type, files)) RunCheck('application/pdf', FilterFiles(['.pdf'])) RunCheck('image/bmp', FilterFiles(['.bmp'])) RunCheck('image/gif', FilterFiles(['.gif'])) RunCheck('image/png', FilterFiles(['.png'])) RunCheck('image/jpeg', FilterFiles(['.jpg', '.jpeg', '.jpe'])) RunCheck('image/vnd.microsoft.icon', FilterFiles(['.ico'])) return output def CheckSvnProperty(input_api, output_api, prop, expected, affected_files): """Checks that affected_files files have prop=expected.""" if input_api.change.scm != 'svn': return [] bad = filter(lambda f: f.Property(prop) != expected, affected_files) if bad: if input_api.is_committing: res_type = output_api.PresubmitError else: res_type = output_api.PresubmitNotifyResult message = 'Run the command: svn pset %s %s \\' % (prop, expected) return [res_type(message, items=bad)] return [] ### Other checks def CheckDoNotSubmit(input_api, output_api): return ( CheckDoNotSubmitInDescription(input_api, output_api) + CheckDoNotSubmitInFiles(input_api, output_api) ) def CheckTreeIsOpen(input_api, output_api, url=None, closed=None, json_url=None): """Check whether to allow commit without prompt. Supports two styles: 1. Checks that an url's content doesn't match a regexp that would mean that the tree is closed. (old) 2. Check the json_url to decide whether to allow commit without prompt. Args: input_api: input related apis. output_api: output related apis. url: url to use for regex based tree status. closed: regex to match for closed status. json_url: url to download json style status. """ if not input_api.is_committing: return [] try: if json_url: connection = input_api.urllib2.urlopen(json_url) status = input_api.json.loads(connection.read()) connection.close() if not status['can_commit_freely']: short_text = 'Tree state is: ' + status['general_state'] long_text = status['message'] + '\n' + json_url return [output_api.PresubmitError(short_text, long_text=long_text)] else: # TODO(bradnelson): drop this once all users are gone. connection = input_api.urllib2.urlopen(url) status = connection.read() connection.close() if input_api.re.match(closed, status): long_text = status + '\n' + url return [output_api.PresubmitError('The tree is closed.', long_text=long_text)] except IOError as e: return [output_api.PresubmitError('Error fetching tree status.', long_text=str(e))] return [] def GetUnitTestsInDirectory( input_api, output_api, directory, whitelist=None, blacklist=None, env=None): """Lists all files in a directory and runs them. Doesn't recurse. It's mainly a wrapper for RunUnitTests. Use whitelist and blacklist to filter tests accordingly. """ unit_tests = [] test_path = input_api.os_path.abspath( input_api.os_path.join(input_api.PresubmitLocalPath(), directory)) def check(filename, filters): return any(True for i in filters if input_api.re.match(i, filename)) to_run = found = 0 for filename in input_api.os_listdir(test_path): found += 1 fullpath = input_api.os_path.join(test_path, filename) if not input_api.os_path.isfile(fullpath): continue if whitelist and not check(filename, whitelist): continue if blacklist and check(filename, blacklist): continue unit_tests.append(input_api.os_path.join(directory, filename)) to_run += 1 input_api.logging.debug('Found %d files, running %d' % (found, to_run)) if not to_run: return [ output_api.PresubmitPromptWarning( 'Out of %d files, found none that matched w=%r, b=%r in directory %s' % (found, whitelist, blacklist, directory)) ] return GetUnitTests(input_api, output_api, unit_tests, env) def GetUnitTests(input_api, output_api, unit_tests, env=None): """Runs all unit tests in a directory. On Windows, sys.executable is used for unit tests ending with ".py". """ # We don't want to hinder users from uploading incomplete patches. if input_api.is_committing: message_type = output_api.PresubmitError else: message_type = output_api.PresubmitPromptWarning results = [] for unit_test in unit_tests: cmd = [] if input_api.platform == 'win32' and unit_test.endswith('.py'): # Windows needs some help. cmd = [input_api.python_executable] cmd.append(unit_test) if input_api.verbose: cmd.append('--verbose') kwargs = {'cwd': input_api.PresubmitLocalPath()} if env: kwargs['env'] = env results.append(input_api.Command( name=unit_test, cmd=cmd, kwargs=kwargs, message=message_type)) return results def GetUnitTestsRecursively(input_api, output_api, directory, whitelist, blacklist): """Gets all files in the directory tree (git repo) that match the whitelist. Restricts itself to only find files within the Change's source repo, not dependencies. """ def check(filename): return (any(input_api.re.match(f, filename) for f in whitelist) and not any(input_api.re.match(f, filename) for f in blacklist)) tests = [] to_run = found = 0 for filepath in input_api.change.AllFiles(directory): found += 1 if check(filepath): to_run += 1 tests.append(filepath) input_api.logging.debug('Found %d files, running %d' % (found, to_run)) if not to_run: return [ output_api.PresubmitPromptWarning( 'Out of %d files, found none that matched w=%r, b=%r in directory %s' % (found, whitelist, blacklist, directory)) ] return GetUnitTests(input_api, output_api, tests) def GetPythonUnitTests(input_api, output_api, unit_tests): """Run the unit tests out of process, capture the output and use the result code to determine success. DEPRECATED. """ # We don't want to hinder users from uploading incomplete patches. if input_api.is_committing: message_type = output_api.PresubmitError else: message_type = output_api.PresubmitNotifyResult results = [] for unit_test in unit_tests: # Run the unit tests out of process. This is because some unit tests # stub out base libraries and don't clean up their mess. It's too easy to # get subtle bugs. cwd = None env = None unit_test_name = unit_test # 'python -m test.unit_test' doesn't work. We need to change to the right # directory instead. if '.' in unit_test: # Tests imported in submodules (subdirectories) assume that the current # directory is in the PYTHONPATH. Manually fix that. unit_test = unit_test.replace('.', '/') cwd = input_api.os_path.dirname(unit_test) unit_test = input_api.os_path.basename(unit_test) env = input_api.environ.copy() # At least on Windows, it seems '.' must explicitly be in PYTHONPATH backpath = [ '.', input_api.os_path.pathsep.join(['..'] * (cwd.count('/') + 1)) ] if env.get('PYTHONPATH'): backpath.append(env.get('PYTHONPATH')) env['PYTHONPATH'] = input_api.os_path.pathsep.join((backpath)) cmd = [input_api.python_executable, '-m', '%s' % unit_test] results.append(input_api.Command( name=unit_test_name, cmd=cmd, kwargs={'env': env, 'cwd': cwd}, message=message_type)) return results def RunUnitTestsInDirectory(input_api, *args, **kwargs): """Run tests in a directory serially. For better performance, use GetUnitTestsInDirectory and then pass to input_api.RunTests. """ return input_api.RunTests( GetUnitTestsInDirectory(input_api, *args, **kwargs), False) def RunUnitTests(input_api, *args, **kwargs): """Run tests serially. For better performance, use GetUnitTests and then pass to input_api.RunTests. """ return input_api.RunTests(GetUnitTests(input_api, *args, **kwargs), False) def RunPythonUnitTests(input_api, *args, **kwargs): """Run python tests in a directory serially. DEPRECATED """ return input_api.RunTests( GetPythonUnitTests(input_api, *args, **kwargs), False) def _FetchAllFiles(input_api, white_list, black_list): """Hack to fetch all files.""" # We cannot use AffectedFiles here because we want to test every python # file on each single python change. It's because a change in a python file # can break another unmodified file. # Use code similar to InputApi.FilterSourceFile() def Find(filepath, filters): for item in filters: if input_api.re.match(item, filepath): return True return False files = [] path_len = len(input_api.PresubmitLocalPath()) for dirpath, dirnames, filenames in input_api.os_walk( input_api.PresubmitLocalPath()): # Passes dirnames in black list to speed up search. for item in dirnames[:]: filepath = input_api.os_path.join(dirpath, item)[path_len + 1:] if Find(filepath, black_list): dirnames.remove(item) for item in filenames: filepath = input_api.os_path.join(dirpath, item)[path_len + 1:] if Find(filepath, white_list) and not Find(filepath, black_list): files.append(filepath) return files def GetPylint(input_api, output_api, white_list=None, black_list=None, disabled_warnings=None, extra_paths_list=None, pylintrc=None): """Run pylint on python files. The default white_list enforces looking only at *.py files. """ white_list = tuple(white_list or ('.*\.py$',)) black_list = tuple(black_list or input_api.DEFAULT_BLACK_LIST) extra_paths_list = extra_paths_list or [] if input_api.is_committing: error_type = output_api.PresubmitError else: error_type = output_api.PresubmitPromptWarning # Only trigger if there is at least one python file affected. def rel_path(regex): """Modifies a regex for a subject to accept paths relative to root.""" def samefile(a, b): # Default implementation for platforms lacking os.path.samefile # (like Windows). return input_api.os_path.abspath(a) == input_api.os_path.abspath(b) samefile = getattr(input_api.os_path, 'samefile', samefile) if samefile(input_api.PresubmitLocalPath(), input_api.change.RepositoryRoot()): return regex prefix = input_api.os_path.join(input_api.os_path.relpath( input_api.PresubmitLocalPath(), input_api.change.RepositoryRoot()), '') return input_api.re.escape(prefix) + regex src_filter = lambda x: input_api.FilterSourceFile( x, map(rel_path, white_list), map(rel_path, black_list)) if not input_api.AffectedSourceFiles(src_filter): input_api.logging.info('Skipping pylint: no matching changes.') return [] if pylintrc is not None: pylintrc = input_api.os_path.join(input_api.PresubmitLocalPath(), pylintrc) else: pylintrc = input_api.os_path.join(_HERE, 'pylintrc') extra_args = ['--rcfile=%s' % pylintrc] if disabled_warnings: extra_args.extend(['-d', ','.join(disabled_warnings)]) files = _FetchAllFiles(input_api, white_list, black_list) if not files: return [] files.sort() input_api.logging.info('Running pylint on %d files', len(files)) input_api.logging.debug('Running pylint on: %s', files) # Copy the system path to the environment so pylint can find the right # imports. env = input_api.environ.copy() import sys env['PYTHONPATH'] = input_api.os_path.pathsep.join( extra_paths_list + sys.path).encode('utf8') def GetPylintCmd(flist, extra, parallel): # Windows needs help running python files so we explicitly specify # the interpreter to use. It also has limitations on the size of # the command-line, so we pass arguments via a pipe. cmd = [input_api.python_executable, input_api.os_path.join(_HERE, 'third_party', 'pylint.py'), '--args-on-stdin'] if len(flist) == 1: description = flist[0] else: description = '%s files' % len(flist) args = extra_args[:] if extra: args.extend(extra) description += ' using %s' % (extra,) if parallel: args.append('--jobs=%s' % input_api.cpu_count) description += ' on %d cores' % input_api.cpu_count return input_api.Command( name='Pylint (%s)' % description, cmd=cmd, kwargs={'env': env, 'stdin': '\n'.join(args + flist)}, message=error_type) # Always run pylint and pass it all the py files at once. # Passing py files one at time is slower and can produce # different results. input_api.verbose used to be used # to enable this behaviour but differing behaviour in # verbose mode is not desirable. # Leave this unreachable code in here so users can make # a quick local edit to diagnose pylint issues more # easily. if True: # pylint's cycle detection doesn't work in parallel, so spawn a second, # single-threaded job for just that check. # Some PRESUBMITs explicitly mention cycle detection. if not any('R0401' in a or 'cyclic-import' in a for a in extra_args): return [ GetPylintCmd(files, ["--disable=cyclic-import"], True), GetPylintCmd(files, ["--disable=all", "--enable=cyclic-import"], False) ] else: return [ GetPylintCmd(files, [], True) ] else: return map(lambda x: GetPylintCmd([x], [], 1), files) def RunPylint(input_api, *args, **kwargs): """Legacy presubmit function. For better performance, get all tests and then pass to input_api.RunTests. """ return input_api.RunTests(GetPylint(input_api, *args, **kwargs), False) # TODO(dpranke): Get the host_url from the input_api instead def CheckRietveldTryJobExecution(dummy_input_api, dummy_output_api, dummy_host_url, dummy_platforms, dummy_owner): # Temporarily 'fix' the check while the Rietveld API is being upgraded to # something sensible. return [] def CheckBuildbotPendingBuilds(input_api, output_api, url, max_pendings, ignored): try: connection = input_api.urllib2.urlopen(url) raw_data = connection.read() connection.close() except IOError: return [output_api.PresubmitNotifyResult('%s is not accessible' % url)] try: data = input_api.json.loads(raw_data) except ValueError: return [output_api.PresubmitNotifyResult('Received malformed json while ' 'looking up buildbot status')] out = [] for (builder_name, builder) in data.iteritems(): if builder_name in ignored: continue if builder.get('state', '') == 'offline': continue pending_builds_len = len(builder.get('pending_builds', [])) if pending_builds_len > max_pendings: out.append('%s has %d build(s) pending' % (builder_name, pending_builds_len)) if out: return [output_api.PresubmitPromptWarning( 'Build(s) pending. It is suggested to wait that no more than %d ' 'builds are pending.' % max_pendings, long_text='\n'.join(out))] return [] def CheckOwners(input_api, output_api, source_file_filter=None): if input_api.is_committing: if input_api.tbr: return [output_api.PresubmitNotifyResult( '--tbr was specified, skipping OWNERS check')] if input_api.change.issue: if _GetRietveldIssueProps(input_api, None).get('cq_dry_run', False): return [output_api.PresubmitNotifyResult( 'This is a CQ dry run, skipping OWNERS check')] else: return [output_api.PresubmitError("OWNERS check failed: this change has " "no Rietveld issue number, so we can't check it for approvals.")] needed = 'LGTM from an OWNER' output = output_api.PresubmitError else: needed = 'OWNER reviewers' output = output_api.PresubmitNotifyResult affected_files = set([f.LocalPath() for f in input_api.change.AffectedFiles(file_filter=source_file_filter)]) owners_db = input_api.owners_db owner_email, reviewers = _RietveldOwnerAndReviewers( input_api, owners_db.email_regexp, approval_needed=input_api.is_committing) owner_email = owner_email or input_api.change.author_email if owner_email: reviewers_plus_owner = set([owner_email]).union(reviewers) missing_files = owners_db.files_not_covered_by(affected_files, reviewers_plus_owner) else: missing_files = owners_db.files_not_covered_by(affected_files, reviewers) if missing_files: output_list = [ output('Missing %s for these files:\n %s' % (needed, '\n '.join(sorted(missing_files))))] if not input_api.is_committing: suggested_owners = owners_db.reviewers_for(missing_files, owner_email) output_list.append(output('Suggested OWNERS: ' + '(Use "git-cl owners" to interactively select owners.)\n %s' % ('\n '.join(suggested_owners or [])))) return output_list if input_api.is_committing and not reviewers: return [output('Missing LGTM from someone other than %s' % owner_email)] return [] def _GetRietveldIssueProps(input_api, messages): """Gets the issue properties from rietveld.""" issue = input_api.change.issue if issue and input_api.rietveld: return input_api.rietveld.get_issue_properties( issue=int(issue), messages=messages) def _ReviewersFromChange(change): """Return the reviewers specified in the |change|, if any.""" reviewers = set() if change.R: reviewers.update(set([r.strip() for r in change.R.split(',')])) if change.TBR: reviewers.update(set([r.strip() for r in change.TBR.split(',')])) # Drop reviewers that aren't specified in email address format. return set(reviewer for reviewer in reviewers if '@' in reviewer) def _RietveldOwnerAndReviewers(input_api, email_regexp, approval_needed=False): """Return the owner and reviewers of a change, if any. If approval_needed is True, only reviewers who have approved the change will be returned. """ issue_props = _GetRietveldIssueProps(input_api, True) if not issue_props: reviewers = set() if not approval_needed: reviewers = _ReviewersFromChange(input_api.change) return None, reviewers if not approval_needed: return issue_props['owner_email'], set(issue_props['reviewers']) owner_email = issue_props['owner_email'] def match_reviewer(r): return email_regexp.match(r) and r != owner_email messages = issue_props.get('messages', []) approvers = set( m['sender'] for m in messages if m.get('approval') and match_reviewer(m['sender'])) return owner_email, approvers def _CheckConstNSObject(input_api, output_api, source_file_filter): """Checks to make sure no objective-c files have |const NSSomeClass*|.""" pattern = input_api.re.compile( r'(?<!reinterpret_cast<)' r'const\s+NS(?!(Point|Range|Rect|Size)\s*\*)\w*\s*\*') def objective_c_filter(f): return (source_file_filter(f) and input_api.os_path.splitext(f.LocalPath())[1] in ('.h', '.m', '.mm')) files = [] for f in input_api.AffectedSourceFiles(objective_c_filter): contents = input_api.ReadFile(f) if pattern.search(contents): files.append(f) if files: if input_api.is_committing: res_type = output_api.PresubmitPromptWarning else: res_type = output_api.PresubmitNotifyResult return [ res_type('|const NSClass*| is wrong, see ' + 'http://dev.chromium.org/developers/clang-mac', files) ] return [] def CheckSingletonInHeaders(input_api, output_api, source_file_filter=None): """Deprecated, must be removed.""" return [ output_api.PresubmitNotifyResult( 'CheckSingletonInHeaders is deprecated, please remove it.') ] def PanProjectChecks(input_api, output_api, excluded_paths=None, text_files=None, license_header=None, project_name=None, owners_check=True, maxlen=80): """Checks that ALL chromium orbit projects should use. These are checks to be run on all Chromium orbit project, including: Chromium Native Client V8 When you update this function, please take this broad scope into account. Args: input_api: Bag of input related interfaces. output_api: Bag of output related interfaces. excluded_paths: Don't include these paths in common checks. text_files: Which file are to be treated as documentation text files. license_header: What license header should be on files. project_name: What is the name of the project as it appears in the license. Returns: A list of warning or error objects. """ excluded_paths = tuple(excluded_paths or []) text_files = tuple(text_files or ( r'.+\.txt$', r'.+\.json$', )) project_name = project_name or 'Chromium' # Accept any year number from 2006 to the current year, or the special # 2006-20xx string used on the oldest files. 2006-20xx is deprecated, but # tolerated on old files. current_year = int(input_api.time.strftime('%Y')) allowed_years = (str(s) for s in reversed(xrange(2006, current_year + 1))) years_re = '(' + '|'.join(allowed_years) + '|2006-2008|2006-2009|2006-2010)' # The (c) is deprecated, but tolerate it until it's removed from all files. license_header = license_header or ( r'.*? Copyright (\(c\) )?%(year)s The %(project)s Authors\. ' r'All rights reserved\.\n' r'.*? Use of this source code is governed by a BSD-style license that ' r'can be\n' r'.*? found in the LICENSE file\.(?: \*/)?\n' ) % { 'year': years_re, 'project': project_name, } results = [] # This code loads the default black list (e.g. third_party, experimental, etc) # and add our black list (breakpad, skia and v8 are still not following # google style and are not really living this repository). # See presubmit_support.py InputApi.FilterSourceFile for the (simple) usage. black_list = input_api.DEFAULT_BLACK_LIST + excluded_paths white_list = input_api.DEFAULT_WHITE_LIST + text_files sources = lambda x: input_api.FilterSourceFile(x, black_list=black_list) text_files = lambda x: input_api.FilterSourceFile( x, black_list=black_list, white_list=white_list) snapshot_memory = [] def snapshot(msg): """Measures & prints performance warning if a rule is running slow.""" dt2 = input_api.time.clock() if snapshot_memory: delta_ms = int(1000*(dt2 - snapshot_memory[0])) if delta_ms > 500: print " %s took a long time: %dms" % (snapshot_memory[1], delta_ms) snapshot_memory[:] = (dt2, msg) if owners_check: snapshot("checking owners") results.extend(input_api.canned_checks.CheckOwners( input_api, output_api, source_file_filter=None)) snapshot("checking long lines") results.extend(input_api.canned_checks.CheckLongLines( input_api, output_api, maxlen, source_file_filter=sources)) snapshot( "checking tabs") results.extend(input_api.canned_checks.CheckChangeHasNoTabs( input_api, output_api, source_file_filter=sources)) snapshot( "checking stray whitespace") results.extend(input_api.canned_checks.CheckChangeHasNoStrayWhitespace( input_api, output_api, source_file_filter=sources)) snapshot("checking nsobjects") results.extend(_CheckConstNSObject( input_api, output_api, source_file_filter=sources)) snapshot("checking eol style") results.extend(input_api.canned_checks.CheckChangeSvnEolStyle( input_api, output_api, source_file_filter=text_files)) snapshot("checking license") results.extend(input_api.canned_checks.CheckLicense( input_api, output_api, license_header, source_file_filter=sources)) if input_api.is_committing: snapshot("checking svn mime types") results.extend(input_api.canned_checks.CheckSvnForCommonMimeTypes( input_api, output_api)) snapshot("checking was uploaded") results.extend(input_api.canned_checks.CheckChangeWasUploaded( input_api, output_api)) snapshot("checking description") results.extend(input_api.canned_checks.CheckChangeHasDescription( input_api, output_api)) results.extend(input_api.canned_checks.CheckDoNotSubmitInDescription( input_api, output_api)) snapshot("checking do not submit in files") results.extend(input_api.canned_checks.CheckDoNotSubmitInFiles( input_api, output_api)) snapshot("done") return results def CheckPatchFormatted(input_api, output_api): import git_cl cmd = ['cl', 'format', '--dry-run', input_api.PresubmitLocalPath()] code, _ = git_cl.RunGitWithCode(cmd, suppress_stderr=True) if code == 2: short_path = input_api.basename(input_api.PresubmitLocalPath()) full_path = input_api.os_path.relpath(input_api.PresubmitLocalPath(), input_api.change.RepositoryRoot()) return [output_api.PresubmitPromptWarning( 'The %s directory requires source formatting. ' 'Please run git cl format %s' % (short_path, full_path))] # As this is just a warning, ignore all other errors if the user # happens to have a broken clang-format, doesn't use git, etc etc. return [] def CheckGNFormatted(input_api, output_api): import gn affected_files = input_api.AffectedFiles( include_deletes=False, file_filter=lambda x: x.LocalPath().endswith('.gn') or x.LocalPath().endswith('.gni')) warnings = [] for f in affected_files: cmd = ['gn', 'format', '--dry-run', f.AbsoluteLocalPath()] rc = gn.main(cmd) if rc == 2: warnings.append(output_api.PresubmitPromptWarning( '%s requires formatting. Please run `gn format --in-place %s`.' % ( f.AbsoluteLocalPath(), f.LocalPath()))) # It's just a warning, so ignore other types of failures assuming they'll be # caught elsewhere. return warnings
{ "content_hash": "39042c187efdee2a72eaede3970a86f1", "timestamp": "", "source": "github", "line_count": 1132, "max_line_length": 80, "avg_line_length": 36.62720848056537, "alnum_prop": 0.6637161738459312, "repo_name": "junhuac/MQUIC", "id": "96bd094c226a8ea0156a0e3b4a240105050e07f9", "size": "41629", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "depot_tools/presubmit_canned_checks.py", "mode": "33188", "license": "mit", "language": [ { "name": "ApacheConf", "bytes": "25707" }, { "name": "Assembly", "bytes": "5386" }, { "name": "Batchfile", "bytes": "42909" }, { "name": "C", "bytes": "1168925" }, { "name": "C#", "bytes": "81308" }, { "name": "C++", "bytes": "43919800" }, { "name": "CMake", "bytes": "46379" }, { "name": "CSS", "bytes": "19668" }, { "name": "Emacs Lisp", "bytes": "32613" }, { "name": "Go", "bytes": "7247" }, { "name": "Groff", "bytes": "127224" }, { "name": "HTML", "bytes": "2548385" }, { "name": "Java", "bytes": "1332462" }, { "name": "JavaScript", "bytes": "851006" }, { "name": "M4", "bytes": "29823" }, { "name": "Makefile", "bytes": "459525" }, { "name": "Objective-C", "bytes": "120158" }, { "name": "Objective-C++", "bytes": "330017" }, { "name": "PHP", "bytes": "11283" }, { "name": "Protocol Buffer", "bytes": "2991" }, { "name": "Python", "bytes": "16872234" }, { "name": "R", "bytes": "1842" }, { "name": "Ruby", "bytes": "937" }, { "name": "Shell", "bytes": "764509" }, { "name": "Swift", "bytes": "116" }, { "name": "VimL", "bytes": "12288" }, { "name": "nesC", "bytes": "14779" } ], "symlink_target": "" }